diff options
Diffstat (limited to 'storage')
551 files changed, 24657 insertions, 14872 deletions
diff --git a/storage/archive/CMakeLists.txt b/storage/archive/CMakeLists.txt index 09227a6cc2d..9a1cfe081b6 100644 --- a/storage/archive/CMakeLists.txt +++ b/storage/archive/CMakeLists.txt @@ -20,5 +20,9 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(archive azio.c ha_archive.cc ha_archive.h) -TARGET_LINK_LIBRARIES(archive zlib mysys dbug strings) + +SET(ARCHIVE_SOURCES azio.c ha_archive.cc ha_archive.h) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(archive ${ARCHIVE_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c index 1de398625dc..bfc01073161 100644 --- a/storage/archive/archive_reader.c +++ b/storage/archive/archive_reader.c @@ -3,6 +3,8 @@ #include <assert.h> #include <stdio.h> #include <stdarg.h> +#include <m_ctype.h> +#include <m_string.h> #include <my_getopt.h> #include <mysql_version.h> @@ -15,10 +17,12 @@ static void get_options(int *argc,char * * *argv); static void print_version(void); static void usage(void); static const char *opt_tmpdir; -static const char *new_auto_increment_value; +static const char *new_auto_increment; +unsigned long long new_auto_increment_value; static const char *load_default_groups[]= { "archive_reader", 0 }; static char **default_argv; -int opt_check, opt_force, opt_quiet, opt_backup= 0; +int opt_check, opt_force, opt_quiet, opt_backup= 0, opt_extract_frm; +int opt_autoincrement; int main(int argc, char *argv[]) { @@ -40,6 +44,35 @@ int main(int argc, char *argv[]) return 0; } + if (opt_autoincrement) + { + azio_stream writer_handle; + + if (new_auto_increment_value) + { + if (reader_handle.auto_increment >= new_auto_increment_value) + { + printf("Value is lower then current value\n"); + goto end; + } + } + else + { + new_auto_increment_value= reader_handle.auto_increment + 1; + } + + if (!(ret= azopen(&writer_handle, argv[0], O_CREAT|O_RDWR|O_BINARY))) + { + printf("Could not open file for update: %s\n", argv[0]); + goto end; + } + + writer_handle.auto_increment= new_auto_increment_value; + + azclose(&writer_handle); + azflush(&reader_handle, Z_SYNC_FLUSH); + } + printf("Version %u\n", reader_handle.version); if (reader_handle.version > 2) { @@ -53,6 +86,20 @@ int main(int argc, char *argv[]) printf("\tLongest Row %u\n", reader_handle.longest_row); printf("\tShortest Row %u\n", reader_handle.shortest_row); printf("\tState %s\n", ( reader_handle.dirty ? "dirty" : "clean")); + printf("\tFRM stored at %u\n", reader_handle.frm_start_pos); + printf("\tComment stored at %u\n", reader_handle.comment_start_pos); + printf("\tData starts at %u\n", (unsigned int)reader_handle.start); + if (reader_handle.frm_start_pos) + printf("\tFRM length %u\n", reader_handle.frm_length); + if (reader_handle.comment_start_pos) + { + char *comment = + (char *) malloc(sizeof(char) * reader_handle.comment_length); + azread_comment(&reader_handle, comment); + printf("\tComment length %u\n\t\t%.*s\n", reader_handle.comment_length, + reader_handle.comment_length, comment); + free(comment); + } } else { @@ -63,7 +110,7 @@ int main(int argc, char *argv[]) if (opt_check) { - byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; + uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE]; int error; unsigned int x; unsigned int read; @@ -71,7 +118,7 @@ int main(int argc, char *argv[]) unsigned long long row_count= 0; char buffer; - while ((read= azread(&reader_handle, (byte *)size_buffer, + while ((read= azread(&reader_handle, (uchar *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error))) { if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE)) @@ -124,7 +171,7 @@ int main(int argc, char *argv[]) if (opt_backup) { - byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; + uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE]; int error; unsigned int read; unsigned int row_len; @@ -148,8 +195,25 @@ int main(int argc, char *argv[]) } writer_handle.auto_increment= reader_handle.auto_increment; + if (reader_handle.frm_length) + { + char *ptr; + ptr= (char *)my_malloc(sizeof(char) * reader_handle.frm_length, MYF(0)); + azread_frm(&reader_handle, ptr); + azwrite_frm(&writer_handle, ptr, reader_handle.frm_length); + my_free(ptr, MYF(0)); + } - while ((read= azread(&reader_handle, (byte *)size_buffer, + if (reader_handle.comment_length) + { + char *ptr; + ptr= (char *)my_malloc(sizeof(char) * reader_handle.comment_length, MYF(0)); + azread_comment(&reader_handle, ptr); + azwrite_comment(&writer_handle, ptr, reader_handle.comment_length); + my_free(ptr, MYF(0)); + } + + while ((read= azread(&reader_handle, (uchar *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error))) { if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE)) @@ -192,6 +256,18 @@ int main(int argc, char *argv[]) azclose(&writer_handle); } + if (opt_extract_frm) + { + File frm_file; + char *ptr; + frm_file= my_open(argv[1], O_CREAT|O_RDWR|O_BINARY, MYF(0)); + ptr= (char *)my_malloc(sizeof(char) * reader_handle.frm_length, MYF(0)); + azread_frm(&reader_handle, ptr); + my_write(frm_file, (uchar*) ptr, reader_handle.frm_length, MYF(0)); + my_close(frm_file, MYF(0)); + my_free(ptr, MYF(0)); + } + end: printf("\n"); azclose(&reader_handle); @@ -211,6 +287,9 @@ get_one_option(int optid, case 'c': opt_check= 1; break; + case 'e': + opt_extract_frm= 1; + break; case 'f': opt_force= 1; printf("Not implemented yet\n"); @@ -226,7 +305,11 @@ get_one_option(int optid, printf("Not implemented yet\n"); break; case 'A': - printf("Not implemented yet\n"); + opt_autoincrement= 1; + if (argument) + new_auto_increment_value= strtoull(argument, NULL, 0); + else + new_auto_increment_value= 0; break; case '?': usage(); @@ -257,6 +340,9 @@ static struct my_option my_long_options[] = "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"extract-frm", 'e', + "Extract the frm file.", + 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Restart with -r if there are any errors in the table.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -268,16 +354,16 @@ static struct my_option my_long_options[] = {"repair", 'r', "Repair a damaged Archive version 3 or above file.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"set-auto-increment", 'A', - "Force auto_increment to start at this or higher value.", - (gptr*) &new_auto_increment_value, - (gptr*) &new_auto_increment_value, + "Force auto_increment to start at this or higher value. If no value is given, then sets the next auto_increment value to the highest used value for the auto key + 1.", + (uchar**) &new_auto_increment, + (uchar**) &new_auto_increment, 0, GET_ULL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"silent", 's', "Only print errors. One can use two -s to make archive_reader very silent.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files.", - (gptr*) &opt_tmpdir, + (uchar**) &opt_tmpdir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Print version and exit.", diff --git a/storage/archive/archive_test.c b/storage/archive/archive_test.c index 2d79b954a1a..a5b2d1dfcc9 100644 --- a/storage/archive/archive_test.c +++ b/storage/archive/archive_test.c @@ -17,11 +17,14 @@ #include <string.h> #include <assert.h> #include <stdio.h> +#include <string.h> #include <my_getopt.h> #include <mysql_version.h> #define ARCHIVE_ROW_HEADER_SIZE 4 +#define COMMENT_STRING "Your bases" +#define FRM_STRING "My bases" #define TEST_FILENAME "test.az" #define TEST_STRING_INIT "YOU don't know about me without you have read a book by the name of The Adventures of Tom Sawyer; but that ain't no matter. That book was made by Mr. Mark Twain, and he told the truth, mainly. There was things which he stretched, but mainly he told the truth. That is nothing. I never seen anybody but lied one time or another, without it was Aunt Polly, or the widow, or maybe Mary. Aunt Polly--Tom's Aunt Polly, she is--and Mary, and the Widow Douglas is all told about in that book, which is mostly a true book, with some stretchers, as I said before. Now the way that the book winds up is this: Tom and me found the money that the robbers hid in the cave, and it made us rich. We got six thousand dollars apiece--all gold. It was an awful sight of money when it was piled up. Well, Judge Thatcher he took it and put it out at interest, and it fetched us a dollar a day apiece all the year round --more than a body could tell what to do with. The Widow Douglas she took me for her son, and allowed she would..." #define TEST_LOOP_NUM 100 @@ -33,9 +36,9 @@ char test_string[BUFFER_LEN]; -#define TWOGIG 2147483648 -#define FOURGIG 4294967296 -#define EIGHTGIG 8589934592 +#define TWOGIG LL(2147483648) +#define FOURGIG LL(4294967296) +#define EIGHTGIG LL(8589934592) /* prototypes */ int size_test(unsigned long long length, unsigned long long rows_to_test_for); @@ -44,6 +47,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for); int main(int argc, char *argv[]) { unsigned int ret; + char comment_str[10]; int error; unsigned int x; @@ -67,6 +71,19 @@ int main(int argc, char *argv[]) return 0; } + azwrite_comment(&writer_handle, (char *)COMMENT_STRING, + (unsigned int)strlen(COMMENT_STRING)); + azread_comment(&writer_handle, comment_str); + assert(!memcmp(COMMENT_STRING, comment_str, + strlen(COMMENT_STRING))); + + azwrite_frm(&writer_handle, (char *)FRM_STRING, + (unsigned int)strlen(FRM_STRING)); + azread_frm(&writer_handle, comment_str); + assert(!memcmp(FRM_STRING, comment_str, + strlen(FRM_STRING))); + + if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY))) { printf("Could not open test file\n"); @@ -87,6 +104,10 @@ int main(int argc, char *argv[]) } azflush(&writer_handle, Z_SYNC_FLUSH); + azread_comment(&writer_handle, comment_str); + assert(!memcmp(COMMENT_STRING, comment_str, + strlen(COMMENT_STRING))); + /* Lets test that our internal stats are good */ assert(writer_handle.rows == TEST_LOOP_NUM); @@ -94,15 +115,16 @@ int main(int argc, char *argv[]) azflush(&reader_handle, Z_SYNC_FLUSH); assert(reader_handle.rows == TEST_LOOP_NUM); assert(reader_handle.auto_increment == 0); - assert(reader_handle.check_point == 62); + assert(reader_handle.check_point == 96); assert(reader_handle.forced_flushes == 1); + assert(reader_handle.comment_length == 10); assert(reader_handle.dirty == AZ_STATE_SAVED); writer_handle.auto_increment= 4; azflush(&writer_handle, Z_SYNC_FLUSH); assert(writer_handle.rows == TEST_LOOP_NUM); assert(writer_handle.auto_increment == 4); - assert(writer_handle.check_point == 62); + assert(writer_handle.check_point == 96); assert(writer_handle.forced_flushes == 2); assert(writer_handle.dirty == AZ_STATE_SAVED); @@ -181,7 +203,7 @@ int main(int argc, char *argv[]) azflush(&reader_handle, Z_SYNC_FLUSH); assert(reader_handle.rows == 102); assert(reader_handle.auto_increment == 4); - assert(reader_handle.check_point == 1256); + assert(reader_handle.check_point == 1290); assert(reader_handle.forced_flushes == 4); assert(reader_handle.dirty == AZ_STATE_SAVED); @@ -195,14 +217,13 @@ int main(int argc, char *argv[]) azclose(&writer_handle); azclose(&reader_handle); - exit(0); unlink(TEST_FILENAME); /* Start size tests */ printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n"); - size_test(TWOGIG, 2097152); - size_test(FOURGIG, 4194304); - size_test(EIGHTGIG, 8388608); + size_test(TWOGIG, 2088992L); + size_test(FOURGIG, 4177984L); + size_test(EIGHTGIG, 8355968L); return 0; } @@ -212,6 +233,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) azio_stream writer_handle, reader_handle; unsigned long long write_length; unsigned long long read_length= 0; + unsigned long long count; unsigned int ret; char buffer[BUFFER_LEN]; int error; @@ -222,8 +244,10 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) return 0; } - for (write_length= 0; write_length < length ; write_length+= ret) + for (count= 0, write_length= 0; write_length < length ; + write_length+= ret) { + count++; ret= azwrite(&writer_handle, test_string, BUFFER_LEN); if (ret != BUFFER_LEN) { @@ -235,7 +259,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) azflush(&writer_handle, Z_SYNC_FLUSH); } } - assert(write_length == length); + assert(write_length != count * BUFFER_LEN); /* Number of rows time BUFFER_LEN */ azflush(&writer_handle, Z_SYNC_FLUSH); printf("Reading back data\n"); @@ -257,7 +281,7 @@ int size_test(unsigned long long length, unsigned long long rows_to_test_for) } } - assert(read_length == length); + assert(read_length == write_length); assert(writer_handle.rows == rows_to_test_for); azclose(&writer_handle); azclose(&reader_handle); diff --git a/storage/archive/azio.c b/storage/archive/azio.c index fbb180e4604..c04749444cb 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -19,7 +19,7 @@ static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ static int const az_magic[3] = {0xfe, 0x03, 0x01}; /* az magic header */ -/* gzip flag byte */ +/* gzip flag uchar */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ #define HEAD_CRC 0x02 /* bit 1 set: header CRC present */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ @@ -55,8 +55,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) s->stream.zalloc = (alloc_func)0; s->stream.zfree = (free_func)0; s->stream.opaque = (voidpf)0; - memset(s->inbuf, 0, AZ_BUFSIZE); - memset(s->outbuf, 0, AZ_BUFSIZE); + memset(s->inbuf, 0, AZ_BUFSIZE_READ); + memset(s->outbuf, 0, AZ_BUFSIZE_WRITE); s->stream.next_in = s->inbuf; s->stream.next_out = s->outbuf; s->stream.avail_in = s->stream.avail_out = 0; @@ -109,7 +109,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) return Z_NULL; } } - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; errno = 0; s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd; @@ -128,14 +128,20 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) s->longest_row= 0; s->auto_increment= 0; s->check_point= 0; + s->comment_start_pos= 0; + s->comment_length= 0; + s->frm_start_pos= 0; + s->frm_length= 0; s->dirty= 1; /* We create the file dirty */ + s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE; write_header(s); my_seek(s->file, 0, MY_SEEK_END, MYF(0)); } else if (s->mode == 'w') { - unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; - my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0)); + uchar buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; + my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, + MYF(0)); read_header(s, buffer); /* skip the .az header */ my_seek(s->file, 0, MY_SEEK_END, MYF(0)); } @@ -153,8 +159,7 @@ void write_header(azio_stream *s) char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; char *ptr= buffer; - s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE; - s->block_size= AZ_BUFSIZE; + s->block_size= AZ_BUFSIZE_WRITE; s->version = (unsigned char)az_magic[1]; s->minor_version = (unsigned char)az_magic[2]; @@ -167,8 +172,12 @@ void write_header(azio_stream *s) *(ptr + AZ_BLOCK_POS)= (unsigned char)(s->block_size/1024); /* Reserved for block size */ *(ptr + AZ_STRATEGY_POS)= (unsigned char)Z_DEFAULT_STRATEGY; /* Compression Type */ - int4store(ptr + AZ_FRM_POS, 0); /* FRM Block */ + int4store(ptr + AZ_FRM_POS, s->frm_start_pos); /* FRM Block */ + int4store(ptr + AZ_FRM_LENGTH_POS, s->frm_length); /* FRM Block */ + int4store(ptr + AZ_COMMENT_POS, s->comment_start_pos); /* COMMENT Block */ + int4store(ptr + AZ_COMMENT_LENGTH_POS, s->comment_length); /* COMMENT Block */ int4store(ptr + AZ_META_POS, 0); /* Meta Block */ + int4store(ptr + AZ_META_LENGTH_POS, 0); /* Meta Block */ int8store(ptr + AZ_START_POS, (unsigned long long)s->start); /* Start of Data Block Index Block */ int8store(ptr + AZ_ROW_POS, (unsigned long long)s->rows); /* Start of Data Block Index Block */ int8store(ptr + AZ_FLUSH_POS, (unsigned long long)s->forced_flushes); /* Start of Data Block Index Block */ @@ -176,10 +185,13 @@ void write_header(azio_stream *s) int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */ int4store(ptr+ AZ_LONGEST_POS , s->longest_row); /* Longest row */ int4store(ptr+ AZ_SHORTEST_POS, s->shortest_row); /* Shorest row */ + int4store(ptr+ AZ_FRM_POS, + AZHEADER_SIZE + AZMETA_BUFFER_SIZE); /* FRM position */ *(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */ /* Always begin at the begining, and end there as well */ - my_pwrite(s->file, buffer, (uint)s->start, 0, MYF(0)); + my_pwrite(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, + MYF(0)); } /* =========================================================================== @@ -213,7 +225,7 @@ int get_byte(s) if (s->stream.avail_in == 0) { errno = 0; - s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); + s->stream.avail_in = my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -237,8 +249,8 @@ int get_byte(s) */ void check_header(azio_stream *s) { - int method; /* method byte */ - int flags; /* flags byte */ + int method; /* method uchar */ + int flags; /* flags uchar */ uInt len; int c; @@ -249,7 +261,7 @@ void check_header(azio_stream *s) if (len < 2) { if (len) s->inbuf[0] = s->stream.next_in[0]; errno = 0; - len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0)); + len = (uInt)my_read(s->file, (uchar *)s->inbuf + len, AZ_BUFSIZE_READ >> len, MYF(0)); if (len == 0) s->z_err = Z_ERRNO; s->stream.avail_in += len; s->stream.next_in = s->inbuf; @@ -303,6 +315,8 @@ void check_header(azio_stream *s) buffer[len]= get_byte(s); s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; read_header(s, buffer); + for (; len < s->start; len++) + get_byte(s); } else { @@ -326,6 +340,10 @@ void read_header(azio_stream *s, unsigned char *buffer) s->auto_increment= (unsigned long long)uint8korr(buffer + AZ_AUTOINCREMENT_POS); s->longest_row= (unsigned int)uint4korr(buffer + AZ_LONGEST_POS); s->shortest_row= (unsigned int)uint4korr(buffer + AZ_SHORTEST_POS); + s->frm_start_pos= (unsigned int)uint4korr(buffer + AZ_FRM_POS); + s->frm_length= (unsigned int)uint4korr(buffer + AZ_FRM_LENGTH_POS); + s->comment_start_pos= (unsigned int)uint4korr(buffer + AZ_COMMENT_POS); + s->comment_length= (unsigned int)uint4korr(buffer + AZ_COMMENT_LENGTH_POS); s->dirty= (unsigned int)buffer[AZ_DIRTY_POS]; } else @@ -425,7 +443,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int * if (s->stream.avail_out > 0) { s->stream.avail_out -= - (uInt)my_read(s->file, (byte *)next_out, s->stream.avail_out, MYF(0)); + (uInt)my_read(s->file, (uchar *)next_out, s->stream.avail_out, MYF(0)); } len -= s->stream.avail_out; s->in += len; @@ -438,7 +456,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int * if (s->stream.avail_in == 0 && !s->z_eof) { errno = 0; - s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); + s->stream.avail_in = (uInt)my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -492,12 +510,11 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int * Writes the given number of uncompressed bytes into the compressed file. azwrite returns the number of bytes actually written (0 in case of error). */ -unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len) +unsigned int azwrite (azio_stream *s, const voidp buf, unsigned int len) { s->stream.next_in = (Bytef*)buf; s->stream.avail_in = len; - s->rows++; while (s->stream.avail_in != 0) @@ -506,12 +523,13 @@ unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len) { s->stream.next_out = s->outbuf; - if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE) + if (my_write(s->file, (uchar *)s->outbuf, AZ_BUFSIZE_WRITE, + MYF(0)) != AZ_BUFSIZE_WRITE) { s->z_err = Z_ERRNO; break; } - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; } s->in += s->stream.avail_in; s->out += s->stream.avail_out; @@ -540,6 +558,7 @@ int do_flush (azio_stream *s, int flush) { uInt len; int done = 0; + my_off_t afterwrite_pos; if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; @@ -547,18 +566,18 @@ int do_flush (azio_stream *s, int flush) for (;;) { - len = AZ_BUFSIZE - s->stream.avail_out; + len = AZ_BUFSIZE_WRITE - s->stream.avail_out; if (len != 0) { s->check_point= my_tell(s->file, MYF(0)); - if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len) + if ((uInt)my_write(s->file, (uchar *)s->outbuf, len, MYF(0)) != len) { s->z_err = Z_ERRNO; return Z_ERRNO; } s->stream.next_out = s->outbuf; - s->stream.avail_out = AZ_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE_WRITE; } if (done) break; s->out += s->stream.avail_out; @@ -580,7 +599,10 @@ int do_flush (azio_stream *s, int flush) s->dirty= AZ_STATE_CLEAN; /* Mark it clean, we should be good now */ else s->dirty= AZ_STATE_SAVED; /* Mark it clean, we should be good now */ + + afterwrite_pos= my_tell(s->file, MYF(0)); write_header(s); + my_seek(s->file, afterwrite_pos, SEEK_SET, MYF(0)); return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; } @@ -594,7 +616,8 @@ int ZEXPORT azflush (s, flush) if (s->mode == 'r') { unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; - my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0)); + my_pread(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, + MYF(0)); read_header(s, buffer); /* skip the .az header */ return Z_OK; @@ -658,8 +681,8 @@ my_off_t azseek (s, offset, whence) /* There was a zmemzero here if inbuf was null -Brian */ while (offset > 0) { - uInt size = AZ_BUFSIZE; - if (offset < AZ_BUFSIZE) size = (uInt)offset; + uInt size = AZ_BUFSIZE_WRITE; + if (offset < AZ_BUFSIZE_WRITE) size = (uInt)offset; size = azwrite(s, s->inbuf, size); if (size == 0) return -1L; @@ -702,8 +725,8 @@ my_off_t azseek (s, offset, whence) } while (offset > 0) { int error; - unsigned int size = AZ_BUFSIZE; - if (offset < AZ_BUFSIZE) size = (int)offset; + unsigned int size = AZ_BUFSIZE_READ; + if (offset < AZ_BUFSIZE_READ) size = (int)offset; size = azread(s, s->outbuf, size, &error); if (error <= 0) return -1L; @@ -730,7 +753,7 @@ my_off_t ZEXPORT aztell (file) void putLong (File file, uLong x) { int n; - byte buffer[1]; + uchar buffer[1]; for (n = 0; n < 4; n++) { @@ -782,3 +805,67 @@ int azclose (azio_stream *s) return destroy(s); } + +/* + Though this was added to support MySQL's FRM file, anything can be + stored in this location. +*/ +int azwrite_frm(azio_stream *s, char *blob, unsigned int length) +{ + if (s->mode == 'r') + return 1; + + if (s->rows > 0) + return 1; + + s->frm_start_pos= (uint) s->start; + s->frm_length= length; + s->start+= length; + + my_pwrite(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0)); + + write_header(s); + my_seek(s->file, 0, MY_SEEK_END, MYF(0)); + + return 0; +} + +int azread_frm(azio_stream *s, char *blob) +{ + my_pread(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0)); + + return 0; +} + + +/* + Simple comment field +*/ +int azwrite_comment(azio_stream *s, char *blob, unsigned int length) +{ + if (s->mode == 'r') + return 1; + + if (s->rows > 0) + return 1; + + s->comment_start_pos= (uint) s->start; + s->comment_length= length; + s->start+= length; + + my_pwrite(s->file, (uchar*) blob, s->comment_length, s->comment_start_pos, + MYF(0)); + + write_header(s); + my_seek(s->file, 0, MY_SEEK_END, MYF(0)); + + return 0; +} + +int azread_comment(azio_stream *s, char *blob) +{ + my_pread(s->file, (uchar*) blob, s->comment_length, s->comment_start_pos, + MYF(0)); + + return 0; +} diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index 804532dfdbf..47772b1c4fe 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -49,9 +49,10 @@ extern "C" { #define AZMETA_BUFFER_SIZE sizeof(unsigned long long) \ + sizeof(unsigned long long) + sizeof(unsigned long long) + sizeof(unsigned long long) \ + sizeof(unsigned int) + sizeof(unsigned int) \ + + sizeof(unsigned int) + sizeof(unsigned int) \ + sizeof(unsigned char) -#define AZHEADER_SIZE 21 +#define AZHEADER_SIZE 29 #define AZ_MAGIC_POS 0 #define AZ_VERSION_POS 1 @@ -59,15 +60,19 @@ extern "C" { #define AZ_BLOCK_POS 3 #define AZ_STRATEGY_POS 4 #define AZ_FRM_POS 5 -#define AZ_META_POS 9 -#define AZ_START_POS 13 -#define AZ_ROW_POS 21 -#define AZ_FLUSH_POS 29 -#define AZ_CHECK_POS 37 -#define AZ_AUTOINCREMENT_POS 45 -#define AZ_LONGEST_POS 53 -#define AZ_SHORTEST_POS 57 -#define AZ_DIRTY_POS 61 +#define AZ_FRM_LENGTH_POS 9 +#define AZ_META_POS 13 +#define AZ_META_LENGTH_POS 17 +#define AZ_START_POS 21 +#define AZ_ROW_POS 29 +#define AZ_FLUSH_POS 37 +#define AZ_CHECK_POS 45 +#define AZ_AUTOINCREMENT_POS 53 +#define AZ_LONGEST_POS 61 +#define AZ_SHORTEST_POS 65 +#define AZ_COMMENT_POS 69 +#define AZ_COMMENT_LENGTH_POS 73 +#define AZ_DIRTY_POS 77 /* @@ -191,7 +196,8 @@ extern "C" { /* The deflate compression method (the only one supported in this version) */ #define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ -#define AZ_BUFSIZE 16384 +#define AZ_BUFSIZE_READ 32768 +#define AZ_BUFSIZE_WRITE 16384 typedef struct azio_stream { @@ -199,8 +205,8 @@ typedef struct azio_stream { int z_err; /* error code for last stream operation */ int z_eof; /* set if end of input file */ File file; /* .gz file */ - Byte inbuf[AZ_BUFSIZE]; /* input buffer */ - Byte outbuf[AZ_BUFSIZE]; /* output buffer */ + Byte inbuf[AZ_BUFSIZE_READ]; /* input buffer */ + Byte outbuf[AZ_BUFSIZE_WRITE]; /* output buffer */ uLong crc; /* crc32 of uncompressed data */ char *msg; /* error message */ int transparent; /* 1 if input file is not a .gz file */ @@ -220,6 +226,10 @@ typedef struct azio_stream { unsigned int longest_row; /* Longest row */ unsigned int shortest_row; /* Shortest row */ unsigned char dirty; /* State of file */ + unsigned int frm_start_pos; /* Position for start of FRM */ + unsigned int frm_length; /* Position for start of FRM */ + unsigned int comment_start_pos; /* Position for start of comment */ + unsigned int comment_length; /* Position for start of comment */ } azio_stream; /* basic functions */ @@ -263,7 +273,7 @@ extern unsigned int azread ( azio_stream *s, voidp buf, unsigned int len, int *e gzread returns the number of uncompressed bytes actually read (0 for end of file, -1 for error). */ -extern unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len); +extern unsigned int azwrite (azio_stream *s, const voidp buf, unsigned int len); /* Writes the given number of uncompressed bytes into the compressed file. azwrite returns the number of uncompressed bytes actually written @@ -322,6 +332,11 @@ extern int azclose(azio_stream *file); error number (see function gzerror below). */ +extern int azwrite_frm (azio_stream *s, char *blob, unsigned int length); +extern int azread_frm (azio_stream *s, char *blob); +extern int azwrite_comment (azio_stream *s, char *blob, unsigned int length); +extern int azread_comment (azio_stream *s, char *blob); + #ifdef __cplusplus } #endif diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 1c0fa95e621..6696eac2fbb 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -28,13 +28,13 @@ /* First, if you want to understand storage engines you should look at ha_example.cc and ha_example.h. + This example was written as a test case for a customer who needed a storage engine without indexes that could compress data very well. So, welcome to a completely compressed storage engine. This storage engine only does inserts. No replace, deletes, or updates. All reads are - complete table scans. Compression is done through azip (bzip compresses - better, but only marginally, if someone asks I could add support for - it too, but beaware that it costs a lot more in CPU time then azip). + complete table scans. Compression is done through a combination of packing + and making use of the zlib library We keep a file pointer open for each instance of ha_archive for each read but for writes we keep one open file handle just for that. We flush it @@ -80,38 +80,18 @@ TODO: - Add bzip optional support. Allow users to set compression level. + Allow adjustable block size. Implement versioning, should be easy. Allow for errors, find a way to mark bad rows. Add optional feature so that rows can be flushed at interval (which will cause less compression but may speed up ordered searches). Checkpoint the meta file to allow for faster rebuilds. - Dirty open (right now the meta file is repaired if a crash occured). Option to allow for dirty reads, this would lower the sync calls, which would make inserts a lot faster, but would mean highly arbitrary reads. -Brian */ -/* - Notes on file formats. - The Meta file is layed out as: - check - Just an int of 254 to make sure that the the file we are opening was - never corrupted. - version - The current version of the file format. - rows - This is an unsigned long long which is the number of rows in the data - file. - check point - Reserved for future use - auto increment - MAX value for autoincrement - dirty - Status of the file, whether or not its values are the latest. This - flag is what causes a repair to occur - - The data file: - check - Just an int of 254 to make sure that the the file we are opening was - never corrupted. - version - The current version of the file format. - data - The data is stored in a "row +blobs" format. -*/ /* Variables for archive share methods */ pthread_mutex_t archive_mutex; @@ -121,13 +101,6 @@ static HASH archive_open_tables; #define ARZ ".ARZ" // The data file #define ARN ".ARN" // Files used during an optimize call #define ARM ".ARM" // Meta file (deprecated) -/* - uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN - + uchar -*/ -#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \ - + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + FN_REFLEN \ - + sizeof(uchar) /* uchar + uchar @@ -139,6 +112,10 @@ static HASH archive_open_tables; static handler *archive_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); +int archive_discover(handlerton *hton, THD* thd, const char *db, + const char *name, + uchar **frmblob, + size_t *frmlen); /* Number of rows that will force a bulk insert. @@ -160,11 +137,11 @@ static handler *archive_create_handler(handlerton *hton, /* Used for hash table that tracks open tables. */ -static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, +static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length, my_bool not_used __attribute__((unused))) { *length=share->table_name_length; - return (byte*) share->table_name; + return (uchar*) share->table_name; } @@ -186,10 +163,11 @@ int archive_db_init(void *p) handlerton *archive_hton; archive_hton= (handlerton *)p; - archive_hton->state=SHOW_OPTION_YES; - archive_hton->db_type=DB_TYPE_ARCHIVE_DB; - archive_hton->create=archive_create_handler; - archive_hton->flags=HTON_NO_FLAGS; + archive_hton->state= SHOW_OPTION_YES; + archive_hton->db_type= DB_TYPE_ARCHIVE_DB; + archive_hton->create= archive_create_handler; + archive_hton->flags= HTON_NO_FLAGS; + archive_hton->discover= archive_discover; if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST)) goto error; @@ -233,7 +211,48 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info); /* The size of the offset value we will use for position() */ - ref_length = sizeof(my_off_t); + ref_length= sizeof(my_off_t); + archive_reader_open= FALSE; +} + +int archive_discover(handlerton *hton, THD* thd, const char *db, + const char *name, + uchar **frmblob, + size_t *frmlen) +{ + DBUG_ENTER("archive_discover"); + DBUG_PRINT("archive_discover", ("db: %s, name: %s", db, name)); + azio_stream frm_stream; + char az_file[FN_REFLEN]; + char *frm_ptr; + MY_STAT file_stat; + + fn_format(az_file, name, db, ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME); + + if (!(my_stat(az_file, &file_stat, MYF(0)))) + goto err; + + if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY))) + { + if (errno == EROFS || errno == EACCES) + DBUG_RETURN(my_errno= errno); + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + } + + if (frm_stream.frm_length == 0) + goto err; + + frm_ptr= (char *)my_malloc(sizeof(char) * frm_stream.frm_length, MYF(0)); + azread_frm(&frm_stream, frm_ptr); + azclose(&frm_stream); + + *frmlen= frm_stream.frm_length; + *frmblob= (uchar*) frm_ptr; + + DBUG_RETURN(0); +err: + my_errno= 0; + DBUG_RETURN(1); } /* @@ -288,10 +307,8 @@ int ha_archive::read_data_header(azio_stream *file_to_read) See ha_example.cc for a longer description. */ -ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, - TABLE *table, int *rc) +ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc) { - ARCHIVE_SHARE *share; uint length; DBUG_ENTER("ha_archive::get_share"); @@ -299,7 +316,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, length=(uint) strlen(table_name); if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables, - (byte*) table_name, + (uchar*) table_name, length))) { char *tmp_name; @@ -321,7 +338,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, share->crashed= FALSE; share->archive_write_open= FALSE; fn_format(share->data_file_name, table_name, "", - ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME); strmov(share->table_name, table_name); DBUG_PRINT("ha_archive", ("Data File %s", share->data_file_name)); @@ -345,7 +362,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, share->crashed= archive_tmp.dirty; azclose(&archive_tmp); - VOID(my_hash_insert(&archive_open_tables, (byte*) share)); + VOID(my_hash_insert(&archive_open_tables, (uchar*) share)); thr_lock_init(&share->lock); } share->use_count++; @@ -364,20 +381,21 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, Free the share. See ha_example.cc for a description. */ -int ha_archive::free_share(ARCHIVE_SHARE *share_to_free) +int ha_archive::free_share() { int rc= 0; DBUG_ENTER("ha_archive::free_share"); - DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance", - share_to_free->table_name_length, share_to_free->table_name, - share_to_free->use_count)); + DBUG_PRINT("ha_archive", + ("archive table %.*s has %d open handles on entrance", + share->table_name_length, share->table_name, + share->use_count)); pthread_mutex_lock(&archive_mutex); - if (!--share_to_free->use_count) + if (!--share->use_count) { - hash_delete(&archive_open_tables, (byte*) share_to_free); - thr_lock_delete(&share_to_free->lock); - VOID(pthread_mutex_destroy(&share_to_free->mutex)); + hash_delete(&archive_open_tables, (uchar*) share); + thr_lock_delete(&share->lock); + VOID(pthread_mutex_destroy(&share->mutex)); /* We need to make sure we don't reset the crashed state. If we open a crashed file, wee need to close it as crashed unless @@ -385,12 +403,12 @@ int ha_archive::free_share(ARCHIVE_SHARE *share_to_free) Since we will close the data down after this, we go on and count the flush on close; */ - if (share_to_free->archive_write_open) + if (share->archive_write_open) { - if (azclose(&(share_to_free->archive_write))) + if (azclose(&(share->archive_write))) rc= 1; } - my_free((gptr) share_to_free, MYF(0)); + my_free((uchar*) share, MYF(0)); } pthread_mutex_unlock(&archive_mutex); @@ -418,6 +436,32 @@ int ha_archive::init_archive_writer() } +/* + No locks are required because it is associated with just one handler instance +*/ +int ha_archive::init_archive_reader() +{ + DBUG_ENTER("ha_archive::init_archive_reader"); + /* + It is expensive to open and close the data files and since you can't have + a gzip file that can be both read and written we keep a writer open + that is shared amoung all open tables. + */ + if (!archive_reader_open) + { + if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) + { + DBUG_PRINT("ha_archive", ("Could not open archive read file")); + share->crashed= TRUE; + DBUG_RETURN(1); + } + archive_reader_open= TRUE; + } + + DBUG_RETURN(0); +} + + /* We just implement one additional file extension. */ @@ -445,12 +489,14 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s", (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no")); - share= get_share(name, table, &rc); + share= get_share(name, &rc); if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR)) { - free_share(share); + /* purecov: begin inspected */ + free_share(); DBUG_RETURN(rc); + /* purecov: end */ } else if (rc == HA_ERR_OUT_OF_MEM) { @@ -459,26 +505,17 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_ASSERT(share); - record_buffer= create_record_buffer(table->s->reclength + ARCHIVE_ROW_HEADER_SIZE); if (!record_buffer) { - free_share(share); + free_share(); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } thr_lock_data_init(&share->lock, &lock, NULL); - DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name)); - if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) - { - if (errno == EROFS || errno == EACCES) - DBUG_RETURN(my_errno= errno); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - } - DBUG_PRINT("ha_archive", ("archive table was crashed %s", rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no")); if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR) @@ -515,10 +552,13 @@ int ha_archive::close(void) destroy_record_buffer(record_buffer); /* First close stream */ - if (azclose(&archive)) - rc= 1; + if (archive_reader_open) + { + if (azclose(&archive)) + rc= 1; + } /* then also close share */ - rc|= free_share(share); + rc|= free_share(); DBUG_RETURN(rc); } @@ -540,6 +580,9 @@ int ha_archive::create(const char *name, TABLE *table_arg, char linkname[FN_REFLEN]; int error; azio_stream create_stream; /* Archive file we are working with */ + File frm_file; /* File handler for readers */ + MY_STAT file_stat; // Stat information for the data file + uchar *frm_ptr; DBUG_ENTER("ha_archive::create"); @@ -578,7 +621,6 @@ int ha_archive::create(const char *name, TABLE *table_arg, MY_REPLACE_EXT | MY_UNPACK_FILENAME); fn_format(linkname, name, "", ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME); - //MY_UNPACK_FILENAME | MY_APPEND_EXT); } else { @@ -587,28 +629,63 @@ int ha_archive::create(const char *name, TABLE *table_arg, linkname[0]= 0; } - if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY))) + /* + There is a chance that the file was "discovered". In this case + just use whatever file is there. + */ + if (!(my_stat(name_buff, &file_stat, MYF(0)))) { - error= errno; - goto error2; - } + my_errno= 0; + if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY))) + { + error= errno; + goto error2; + } - if (linkname[0]) - my_symlink(name_buff, linkname, MYF(0)); + if (linkname[0]) + my_symlink(name_buff, linkname, MYF(0)); + fn_format(name_buff, name, "", ".frm", + MY_REPLACE_EXT | MY_UNPACK_FILENAME); + + /* + Here is where we open up the frm and pass it to archive to store + */ + if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) > 0) + { + if (!my_fstat(frm_file, &file_stat, MYF(MY_WME))) + { + frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0)); + if (frm_ptr) + { + my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0)); + azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size); + my_free((uchar*)frm_ptr, MYF(0)); + } + } + my_close(frm_file, MYF(0)); + } + + if (create_info->comment.str) + azwrite_comment(&create_stream, create_info->comment.str, + create_info->comment.length); + + /* + Yes you need to do this, because the starting value + for the autoincrement may not be zero. + */ + create_stream.auto_increment= stats.auto_increment_value; + if (azclose(&create_stream)) + { + error= errno; + goto error2; + } + } + else + my_errno= 0; DBUG_PRINT("ha_archive", ("Creating File %s", name_buff)); DBUG_PRINT("ha_archive", ("Creating Link %s", linkname)); - /* - Yes you need to do this, because the starting value - for the autoincrement may not be zero. - */ - create_stream.auto_increment= stats.auto_increment_value; - if (azclose(&create_stream)) - { - error= errno; - goto error2; - } DBUG_RETURN(0); @@ -622,7 +699,7 @@ error: /* This is where the actual row is written out. */ -int ha_archive::real_write_row(byte *buf, azio_stream *writer) +int ha_archive::real_write_row(uchar *buf, azio_stream *writer) { my_off_t written; unsigned int r_pack_length; @@ -652,7 +729,7 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) the bytes required for the length in the header. */ -uint32 ha_archive::max_row_length(const byte *buf) +uint32 ha_archive::max_row_length(const uchar *buf) { uint32 length= (uint32)(table->s->reclength + table->s->fields*2); length+= ARCHIVE_ROW_HEADER_SIZE; @@ -669,9 +746,9 @@ uint32 ha_archive::max_row_length(const byte *buf) } -unsigned int ha_archive::pack_row(byte *record) +unsigned int ha_archive::pack_row(uchar *record) { - byte *ptr; + uchar *ptr; DBUG_ENTER("ha_archive::pack_row"); @@ -686,8 +763,8 @@ unsigned int ha_archive::pack_row(byte *record) for (Field **field=table->field ; *field ; field++) { - ptr=(byte*) (*field)->pack((char*) ptr, - (char*) record + (*field)->offset(record)); + if (!((*field)->is_null())) + ptr= (*field)->pack(ptr, record + (*field)->offset(record)); } int4store(record_buffer->buffer, (int)(ptr - record_buffer->buffer - @@ -709,26 +786,27 @@ unsigned int ha_archive::pack_row(byte *record) for implementing start_bulk_insert() is that we could skip setting dirty to true each time. */ -int ha_archive::write_row(byte *buf) +int ha_archive::write_row(uchar *buf) { int rc; - byte *read_buf= NULL; + uchar *read_buf= NULL; ulonglong temp_auto; - byte *record= table->record[0]; + uchar *record= table->record[0]; DBUG_ENTER("ha_archive::write_row"); if (share->crashed) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - if (!share->archive_write_open) - if (init_archive_writer()) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); pthread_mutex_lock(&share->mutex); + if (!share->archive_write_open) + if (init_archive_writer()) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + if (table->next_number_field && record == table->record[0]) { KEY *mkey= &table->s->key_info[0]; // We only support one key right now @@ -736,29 +814,28 @@ int ha_archive::write_row(byte *buf) temp_auto= table->next_number_field->val_int(); /* - Simple optimization to see if we fail for duplicate key immediatly - because we have just given out this value. + We don't support decremening auto_increment. They make the performance + just cry. */ - if (temp_auto == share->archive_write.auto_increment && + if (temp_auto <= share->archive_write.auto_increment && mkey->flags & HA_NOSAME) { rc= HA_ERR_FOUND_DUPP_KEY; goto error; } +#ifdef DEAD_CODE /* Bad news, this will cause a search for the unique value which is very expensive since we will have to do a table scan which will lock up all other writers during this period. This could perhaps be optimized in the future. */ - if (temp_auto < share->archive_write.auto_increment && - mkey->flags & HA_NOSAME) { /* First we create a buffer that we can use for reading rows, and can pass to get_row(). */ - if (!(read_buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) + if (!(read_buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) { rc= HA_ERR_OUT_OF_MEM; goto error; @@ -783,13 +860,14 @@ int ha_archive::write_row(byte *buf) { if (!memcmp(read_buf + mfield->offset(record), table->next_number_field->ptr, - mfield->max_length())) + mfield->max_display_length())) { rc= HA_ERR_FOUND_DUPP_KEY; goto error; } } } +#endif else { if (temp_auto > share->archive_write.auto_increment) @@ -807,7 +885,7 @@ int ha_archive::write_row(byte *buf) error: pthread_mutex_unlock(&share->mutex); if (read_buf) - my_free((gptr) read_buf, MYF(0)); + my_free((uchar*) read_buf, MYF(0)); DBUG_RETURN(rc); } @@ -835,7 +913,7 @@ int ha_archive::index_init(uint keynr, bool sorted) No indexes, so if we get a request for an index search since we tell the optimizer that we have unique indexes, we scan */ -int ha_archive::index_read(byte *buf, const byte *key, +int ha_archive::index_read(uchar *buf, const uchar *key, uint key_len, enum ha_rkey_function find_flag) { int rc; @@ -845,10 +923,10 @@ int ha_archive::index_read(byte *buf, const byte *key, } -int ha_archive::index_read_idx(byte *buf, uint index, const byte *key, +int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key, uint key_len, enum ha_rkey_function find_flag) { - int rc= 0; + int rc; bool found= 0; KEY *mkey= &table->s->key_info[index]; current_k_offset= mkey->key_part->offset; @@ -858,22 +936,10 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key, DBUG_ENTER("ha_archive::index_read_idx"); - /* - All of the buffer must be written out or we won't see all of the - data - */ - pthread_mutex_lock(&share->mutex); - azflush(&(share->archive_write), Z_SYNC_FLUSH); - pthread_mutex_unlock(&share->mutex); + rc= rnd_init(TRUE); - /* - Set the position of the local read thread to the beginning postion. - */ - if (read_data_header(&archive)) - { - rc= HA_ERR_CRASHED_ON_USAGE; + if (rc) goto error; - } while (!(get_row(&archive, buf))) { @@ -892,7 +958,7 @@ error: } -int ha_archive::index_next(byte * buf) +int ha_archive::index_next(uchar * buf) { bool found= 0; @@ -923,29 +989,13 @@ int ha_archive::rnd_init(bool scan) if (share->crashed) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + init_archive_reader(); + /* We rewind the file so that we can read from the beginning if scan */ if (scan) { - scan_rows= share->rows_recorded; DBUG_PRINT("info", ("archive will retrieve %llu rows", (unsigned long long) scan_rows)); - stats.records= 0; - - /* - If dirty, we lock, and then reset/flush the data. - I found that just calling azflush() doesn't always work. - */ - if (share->dirty == TRUE) - { - pthread_mutex_lock(&share->mutex); - if (share->dirty == TRUE) - { - DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); - azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->dirty= FALSE; - } - pthread_mutex_unlock(&share->mutex); - } if (read_data_header(&archive)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -959,7 +1009,7 @@ int ha_archive::rnd_init(bool scan) This is the method that is used to read a row. It assumes that the row is positioned where you want it. */ -int ha_archive::get_row(azio_stream *file_to_read, byte *buf) +int ha_archive::get_row(azio_stream *file_to_read, uchar *buf) { int rc; DBUG_ENTER("ha_archive::get_row"); @@ -986,8 +1036,8 @@ bool ha_archive::fix_rec_buff(unsigned int length) if (length > record_buffer->length) { - byte *newptr; - if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer, + uchar *newptr; + if (!(newptr=(uchar*) my_realloc((uchar*) record_buffer->buffer, length, MYF(MY_ALLOW_ZERO_PTR)))) DBUG_RETURN(1); @@ -1000,17 +1050,17 @@ bool ha_archive::fix_rec_buff(unsigned int length) DBUG_RETURN(0); } -int ha_archive::unpack_row(azio_stream *file_to_read, byte *record) +int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record) { DBUG_ENTER("ha_archive::unpack_row"); unsigned int read; int error; - byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; + uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE]; unsigned int row_len; /* First we grab the length stored */ - read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error); + read= azread(file_to_read, size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error); if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -1035,17 +1085,21 @@ int ha_archive::unpack_row(azio_stream *file_to_read, byte *record) } /* Copy null bits */ - const char *ptr= (const char*) record_buffer->buffer; + const uchar *ptr= record_buffer->buffer; memcpy(record, ptr, table->s->null_bytes); ptr+= table->s->null_bytes; for (Field **field=table->field ; *field ; field++) - ptr= (*field)->unpack((char *)record + (*field)->offset(table->record[0]), ptr); - + { + if (!((*field)->is_null())) + { + ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr); + } + } DBUG_RETURN(0); } -int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf) +int ha_archive::get_row_version3(azio_stream *file_to_read, uchar *buf) { DBUG_ENTER("ha_archive::get_row_version3"); @@ -1055,7 +1109,7 @@ int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf) } -int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) +int ha_archive::get_row_version2(azio_stream *file_to_read, uchar *buf) { unsigned int read; int error; @@ -1121,7 +1175,7 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) if ((size_t) read != size) DBUG_RETURN(HA_ERR_END_OF_FILE); - ((Field_blob*) table->field[*ptr])->set_ptr(size, last); + ((Field_blob*) table->field[*ptr])->set_ptr(size, (uchar*) last); last += size; } else @@ -1139,7 +1193,7 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) or by having had ha_archive::rnd_pos() called before it is called. */ -int ha_archive::rnd_next(byte *buf) +int ha_archive::rnd_next(uchar *buf) { int rc; DBUG_ENTER("ha_archive::rnd_next"); @@ -1155,9 +1209,7 @@ int ha_archive::rnd_next(byte *buf) current_position= aztell(&archive); rc= get_row(&archive, buf); - - if (rc != HA_ERR_END_OF_FILE) - stats.records++; + table->status=rc ? STATUS_NOT_FOUND: 0; DBUG_RETURN(rc); } @@ -1169,7 +1221,7 @@ int ha_archive::rnd_next(byte *buf) needed. */ -void ha_archive::position(const byte *record) +void ha_archive::position(const uchar *record) { DBUG_ENTER("ha_archive::position"); my_store_ptr(ref, ref_length, current_position); @@ -1184,7 +1236,7 @@ void ha_archive::position(const byte *record) correctly ordered row. */ -int ha_archive::rnd_pos(byte * buf, byte *pos) +int ha_archive::rnd_pos(uchar * buf, uchar *pos) { DBUG_ENTER("ha_archive::rnd_pos"); ha_statistic_increment(&SSV::ha_read_rnd_next_count); @@ -1223,6 +1275,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) azio_stream writer; char writer_filename[FN_REFLEN]; + init_archive_reader(); + // now we close both our writer and our reader for the rename if (share->archive_write_open) { @@ -1232,7 +1286,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) /* Lets create a file to contain the new data */ fn_format(writer_filename, share->table_name, "", ARN, - MY_REPLACE_EXT|MY_UNPACK_FILENAME); + MY_REPLACE_EXT | MY_UNPACK_FILENAME); if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY))) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -1275,8 +1329,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) { Field *field= table->found_next_number_field; ulonglong auto_value= - (ulonglong) field->val_int((char*)(table->record[0] + - field->offset(table->record[0]))); + (ulonglong) field->val_int(table->record[0] + + field->offset(table->record[0])); if (share->archive_write.auto_increment < auto_value) stats.auto_increment_value= share->archive_write.auto_increment= auto_value; @@ -1364,7 +1418,7 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) DBUG_ENTER("ha_archive::update_create_info"); ha_archive::info(HA_STATUS_AUTO); - if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) + if (create_info->used_fields & HA_CREATE_USED_AUTO) { /* Internally Archive keeps track of last used, not next used. @@ -1391,12 +1445,33 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) int ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); + + /* + If dirty, we lock, and then reset/flush the data. + I found that just calling azflush() doesn't always work. + */ + pthread_mutex_lock(&share->mutex); + if (share->dirty == TRUE) + { + if (share->dirty == TRUE) + { + DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); + azflush(&(share->archive_write), Z_SYNC_FLUSH); + share->dirty= FALSE; + } + } + /* This should be an accurate number now, though bulk and delayed inserts can cause the number to be inaccurate. */ stats.records= share->rows_recorded; + pthread_mutex_unlock(&share->mutex); + + scan_rows= stats.records; stats.deleted= 0; + + DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records)); /* Costs quite a bit more to get all information */ if (flag & HA_STATUS_TIME) { @@ -1415,7 +1490,10 @@ int ha_archive::info(uint flag) if (flag & HA_STATUS_AUTO) { + init_archive_reader(); + pthread_mutex_lock(&share->mutex); azflush(&archive, Z_SYNC_FLUSH); + pthread_mutex_unlock(&share->mutex); stats.auto_increment_value= archive.auto_increment; } @@ -1477,34 +1555,24 @@ bool ha_archive::is_crashed() const int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt) { int rc= 0; - byte *buf; const char *old_proc_info; ha_rows count= share->rows_recorded; DBUG_ENTER("ha_archive::check"); old_proc_info= thd_proc_info(thd, "Checking table"); /* Flush any waiting data */ + pthread_mutex_lock(&share->mutex); azflush(&(share->archive_write), Z_SYNC_FLUSH); - - /* - First we create a buffer that we can use for reading rows, and can pass - to get_row(). - */ - if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) - rc= HA_ERR_OUT_OF_MEM; + pthread_mutex_unlock(&share->mutex); /* Now we will rewind the archive file so that we are positioned at the start of the file. */ - if (!rc) - read_data_header(&archive); - - if (!rc) - while (!(rc= get_row(&archive, buf))) - count--; - - my_free((char*)buf, MYF(0)); + init_archive_reader(); + read_data_header(&archive); + while (!(rc= get_row(&archive, table->record[0]))) + count--; thd_proc_info(thd, old_proc_info); @@ -1544,7 +1612,7 @@ archive_record_buffer *ha_archive::create_record_buffer(unsigned int length) } r->length= (int)length; - if (!(r->buffer= (byte*) my_malloc(r->length, + if (!(r->buffer= (uchar*) my_malloc(r->length, MYF(MY_WME)))) { my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR)); diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 638f0db71d0..ab630ed22fd 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -27,7 +27,7 @@ */ typedef struct st_archive_record_buffer { - byte *buffer; + uchar *buffer; uint32 length; } archive_record_buffer; @@ -62,15 +62,16 @@ class ha_archive: public handler azio_stream archive; /* Archive file we are working with */ my_off_t current_position; /* The position of the row we just read */ - byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ + uchar byte_buffer[IO_SIZE]; /* Initial buffer for our string */ String buffer; /* Buffer used for blob storage */ ha_rows scan_rows; /* Number of rows left in scan */ bool delayed_insert; /* If the insert is delayed */ bool bulk_insert; /* If we are performing a bulk insert */ - const byte *current_key; + const uchar *current_key; uint current_key_len; uint current_k_offset; archive_record_buffer *record_buffer; + bool archive_reader_open; archive_record_buffer *create_record_buffer(unsigned int length); void destroy_record_buffer(archive_record_buffer *r); @@ -86,6 +87,9 @@ public: ulonglong table_flags() const { return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_CAN_BIT_FIELD | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | + HA_STATS_RECORDS_IS_EXACT | + HA_HAS_RECORDS | HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY); } ulong index_flags(uint idx, uint part, bool all_parts) const @@ -99,29 +103,31 @@ public: uint max_supported_keys() const { return 1; } uint max_supported_key_length() const { return sizeof(ulonglong); } uint max_supported_key_part_length() const { return sizeof(ulonglong); } + ha_rows records() { return share->rows_recorded; } int index_init(uint keynr, bool sorted); - virtual int index_read(byte * buf, const byte * key, + virtual int index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag); - virtual int index_read_idx(byte * buf, uint index, const byte * key, + virtual int index_read_idx(uchar * buf, uint index, const uchar * key, uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); + int index_next(uchar * buf); int open(const char *name, int mode, uint test_if_locked); int close(void); - int write_row(byte * buf); - int real_write_row(byte *buf, azio_stream *writer); + int write_row(uchar * buf); + int real_write_row(uchar *buf, azio_stream *writer); int delete_all_rows(); int rnd_init(bool scan=1); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - int get_row(azio_stream *file_to_read, byte *buf); - int get_row_version2(azio_stream *file_to_read, byte *buf); - int get_row_version3(azio_stream *file_to_read, byte *buf); - ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc); - int free_share(ARCHIVE_SHARE *share); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); + int get_row(azio_stream *file_to_read, uchar *buf); + int get_row_version2(azio_stream *file_to_read, uchar *buf); + int get_row_version3(azio_stream *file_to_read, uchar *buf); + ARCHIVE_SHARE *get_share(const char *table_name, int *rc); + int free_share(); int init_archive_writer(); + int init_archive_reader(); bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(azio_stream *file_to_read); - void position(const byte *record); + void position(const uchar *record); int info(uint); void update_create_info(HA_CREATE_INFO *create_info); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); @@ -138,9 +144,9 @@ public: bool is_crashed() const; int check(THD* thd, HA_CHECK_OPT* check_opt); bool check_and_repair(THD *thd); - uint32 max_row_length(const byte *buf); + uint32 max_row_length(const uchar *buf); bool fix_rec_buff(unsigned int length); - int unpack_row(azio_stream *file_to_read, byte *record); - unsigned int pack_row(byte *record); + int unpack_row(azio_stream *file_to_read, uchar *record); + unsigned int pack_row(uchar *record); }; diff --git a/storage/blackhole/CMakeLists.txt b/storage/blackhole/CMakeLists.txt index 6b02e1effa9..9b6dd7adac9 100644 --- a/storage/blackhole/CMakeLists.txt +++ b/storage/blackhole/CMakeLists.txt @@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(blackhole ha_blackhole.cc ha_blackhole.h) + +SET(BLACKHOLE_SOURCES ha_blackhole.cc ha_blackhole.h) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(blackhole ${BLACKHOLE_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index 7bdb4e40b3d..4e12e9f0ee7 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -31,6 +31,14 @@ static handler *blackhole_create_handler(handlerton *hton, } +/* Static declarations for shared structures */ + +static pthread_mutex_t blackhole_mutex; +static HASH blackhole_open_tables; + +static st_blackhole_share *get_share(const char *table_name); +static void free_share(st_blackhole_share *share); + /***************************************************************************** ** BLACKHOLE tables *****************************************************************************/ @@ -53,15 +61,18 @@ const char **ha_blackhole::bas_ext() const int ha_blackhole::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_blackhole::open"); - thr_lock_init(&thr_lock); - thr_lock_data_init(&thr_lock,&lock,NULL); + + if (!(share= get_share(name))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + thr_lock_data_init(&share->lock, &lock, NULL); DBUG_RETURN(0); } int ha_blackhole::close(void) { DBUG_ENTER("ha_blackhole::close"); - thr_lock_delete(&thr_lock); + free_share(share); DBUG_RETURN(0); } @@ -83,7 +94,7 @@ const char *ha_blackhole::index_type(uint key_number) HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE"); } -int ha_blackhole::write_row(byte * buf) +int ha_blackhole::write_row(uchar * buf) { DBUG_ENTER("ha_blackhole::write_row"); DBUG_RETURN(0); @@ -96,14 +107,14 @@ int ha_blackhole::rnd_init(bool scan) } -int ha_blackhole::rnd_next(byte *buf) +int ha_blackhole::rnd_next(uchar *buf) { DBUG_ENTER("ha_blackhole::rnd_next"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::rnd_pos(byte * buf, byte *pos) +int ha_blackhole::rnd_pos(uchar * buf, uchar *pos) { DBUG_ENTER("ha_blackhole::rnd_pos"); DBUG_ASSERT(0); @@ -111,7 +122,7 @@ int ha_blackhole::rnd_pos(byte * buf, byte *pos) } -void ha_blackhole::position(const byte *record) +void ha_blackhole::position(const uchar *record) { DBUG_ENTER("ha_blackhole::position"); DBUG_ASSERT(0); @@ -136,71 +147,153 @@ int ha_blackhole::external_lock(THD *thd, int lock_type) } -uint ha_blackhole::lock_count(void) const -{ - DBUG_ENTER("ha_blackhole::lock_count"); - DBUG_RETURN(0); -} - THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { DBUG_ENTER("ha_blackhole::store_lock"); + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd_in_lock_tables(thd) + && !thd_tablespace_op(thd)) + lock_type = TL_WRITE_ALLOW_WRITE; + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd)) + lock_type = TL_READ; + + lock.type= lock_type; + } + *to++= &lock; DBUG_RETURN(to); } -int ha_blackhole::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_blackhole::index_read_map(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ENTER("ha_blackhole::index_read"); - DBUG_RETURN(0); + DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ENTER("ha_blackhole::index_read_idx"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_read_last(byte * buf, const byte * key, uint key_len) +int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key, + key_part_map keypart_map) { DBUG_ENTER("ha_blackhole::index_read_last"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_next(byte * buf) +int ha_blackhole::index_next(uchar * buf) { DBUG_ENTER("ha_blackhole::index_next"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_prev(byte * buf) +int ha_blackhole::index_prev(uchar * buf) { DBUG_ENTER("ha_blackhole::index_prev"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_first(byte * buf) +int ha_blackhole::index_first(uchar * buf) { DBUG_ENTER("ha_blackhole::index_first"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -int ha_blackhole::index_last(byte * buf) +int ha_blackhole::index_last(uchar * buf) { DBUG_ENTER("ha_blackhole::index_last"); DBUG_RETURN(HA_ERR_END_OF_FILE); } + +static st_blackhole_share *get_share(const char *table_name) +{ + st_blackhole_share *share; + uint length; + + length= (uint) strlen(table_name); + pthread_mutex_lock(&blackhole_mutex); + + if (!(share= (st_blackhole_share*) hash_search(&blackhole_open_tables, + (uchar*) table_name, length))) + { + if (!(share= (st_blackhole_share*) my_malloc(sizeof(st_blackhole_share) + + length, + MYF(MY_WME | MY_ZEROFILL)))) + goto error; + + share->table_name_length= length; + strmov(share->table_name, table_name); + + if (my_hash_insert(&blackhole_open_tables, (uchar*) share)) + { + my_free((uchar*) share, MYF(0)); + share= NULL; + goto error; + } + + thr_lock_init(&share->lock); + } + share->use_count++; + +error: + pthread_mutex_unlock(&blackhole_mutex); + return share; +} + +static void free_share(st_blackhole_share *share) +{ + pthread_mutex_lock(&blackhole_mutex); + if (!--share->use_count) + hash_delete(&blackhole_open_tables, (uchar*) share); + pthread_mutex_unlock(&blackhole_mutex); +} + +static void blackhole_free_key(st_blackhole_share *share) +{ + thr_lock_delete(&share->lock); + my_free((uchar*) share, MYF(0)); +} + +static uchar* blackhole_get_key(st_blackhole_share *share, size_t *length, + my_bool not_used __attribute__((unused))) +{ + *length= share->table_name_length; + return (uchar*) share->table_name; +} + static int blackhole_init(void *p) { handlerton *blackhole_hton; @@ -209,6 +302,20 @@ static int blackhole_init(void *p) blackhole_hton->db_type= DB_TYPE_BLACKHOLE_DB; blackhole_hton->create= blackhole_create_handler; blackhole_hton->flags= HTON_CAN_RECREATE; + + VOID(pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST)); + (void) hash_init(&blackhole_open_tables, system_charset_info,32,0,0, + (hash_get_key) blackhole_get_key, + (hash_free_key) blackhole_free_key, 0); + + return 0; +} + +static int blackhole_fini(void *p) +{ + hash_free(&blackhole_open_tables); + pthread_mutex_destroy(&blackhole_mutex); + return 0; } @@ -224,7 +331,7 @@ mysql_declare_plugin(blackhole) "/dev/null storage engine (anything you write to it disappears)", PLUGIN_LICENSE_GPL, blackhole_init, /* Plugin Init */ - NULL, /* Plugin Deinit */ + blackhole_fini, /* Plugin Deinit */ 0x0100 /* 1.0 */, NULL, /* status variables */ NULL, /* system variables */ diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h index 5388dcfc187..d5a0d08926c 100644 --- a/storage/blackhole/ha_blackhole.h +++ b/storage/blackhole/ha_blackhole.h @@ -18,13 +18,24 @@ #endif /* + Shared structure for correct LOCK operation +*/ +struct st_blackhole_share { + THR_LOCK lock; + uint use_count; + uint table_name_length; + char table_name[1]; +}; + + +/* Class definition for the blackhole storage engine "Dumbest named feature ever" */ class ha_blackhole: public handler { THR_LOCK_DATA lock; /* MySQL lock */ - THR_LOCK thr_lock; + st_blackhole_share *share; public: ha_blackhole(handlerton *hton, TABLE_SHARE *table_arg); @@ -42,6 +53,7 @@ public: ulonglong table_flags() const { return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | + HA_BINLOG_STMT_CAPABLE | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_CAN_INSERT_DELAYED); } @@ -60,23 +72,23 @@ public: uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; } int open(const char *name, int mode, uint test_if_locked); int close(void); - int write_row(byte * buf); + int write_row(uchar * buf); int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - void position(const byte *record); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); + int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_idx_map(uchar * buf, uint idx, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_last_map(uchar * buf, const uchar * key, key_part_map keypart_map); + int index_next(uchar * buf); + int index_prev(uchar * buf); + int index_first(uchar * buf); + int index_last(uchar * buf); + void position(const uchar *record); int info(uint flag); int external_lock(THD *thd, int lock_type); - uint lock_count(void) const; int create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, diff --git a/storage/csv/CMakeLists.txt b/storage/csv/CMakeLists.txt index 359d1509a7e..bb0df45e5f4 100644 --- a/storage/csv/CMakeLists.txt +++ b/storage/csv/CMakeLists.txt @@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(csv ha_tina.cc ha_tina.h transparent_file.cc transparent_file.h) + +SET(CSV_SOURCES ha_tina.cc ha_tina.h transparent_file.cc transparent_file.h) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(csv ${CSV_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index afe8e5f1b27..9a7781e017d 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -46,10 +46,9 @@ TODO: #endif #include "mysql_priv.h" - +#include <mysql/plugin.h> #include "ha_tina.h" -#include <mysql/plugin.h> /* uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar @@ -69,6 +68,10 @@ static int free_share(TINA_SHARE *share); static int read_meta_file(File meta_file, ha_rows *rows); static int write_meta_file(File meta_file, ha_rows rows, bool dirty); +extern "C" void tina_get_status(void* param, int concurrent_insert); +extern "C" void tina_update_status(void* param); +extern "C" my_bool tina_check_status(void* param); + /* Stuff for shares */ pthread_mutex_t tina_mutex; static HASH tina_open_tables; @@ -93,11 +96,11 @@ int sort_set (tina_set *a, tina_set *b) return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) ); } -static byte* tina_get_key(TINA_SHARE *share,uint *length, +static uchar* tina_get_key(TINA_SHARE *share, size_t *length, my_bool not_used __attribute__((unused))) { *length=share->table_name_length; - return (byte*) share->table_name; + return (uchar*) share->table_name; } static int tina_init_func(void *p) @@ -144,7 +147,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) initialize its members. */ if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables, - (byte*) table_name, + (uchar*) table_name, length))) { if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), @@ -164,6 +167,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) share->rows_recorded= 0; share->update_file_opened= FALSE; share->tina_write_opened= FALSE; + share->data_file_version= 0; strmov(share->table_name, table_name); fn_format(share->data_file_name, table_name, "", CSV_EXT, MY_REPLACE_EXT|MY_UNPACK_FILENAME); @@ -174,7 +178,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) goto error; share->saved_data_file_length= file_stat.st_size; - if (my_hash_insert(&tina_open_tables, (byte*) share)) + if (my_hash_insert(&tina_open_tables, (uchar*) share)) goto error; thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); @@ -203,7 +207,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) error: pthread_mutex_unlock(&tina_mutex); - my_free((gptr) share, MYF(0)); + my_free((uchar*) share, MYF(0)); return NULL; } @@ -236,7 +240,7 @@ static int read_meta_file(File meta_file, ha_rows *rows) DBUG_ENTER("ha_tina::read_meta_file"); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); - if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) + if (my_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -305,7 +309,7 @@ static int write_meta_file(File meta_file, ha_rows rows, bool dirty) *ptr= (uchar)dirty; VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); - if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) + if (my_write(meta_file, (uchar *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) DBUG_RETURN(-1); @@ -376,10 +380,10 @@ static int free_share(TINA_SHARE *share) share->tina_write_opened= FALSE; } - hash_delete(&tina_open_tables, (byte*) share); + hash_delete(&tina_open_tables, (uchar*) share); thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); - my_free((gptr) share, MYF(0)); + my_free((uchar*) share, MYF(0)); } pthread_mutex_unlock(&tina_mutex); @@ -441,10 +445,10 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) */ current_position(0), next_position(0), local_saved_data_file_length(0), file_buff(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH), - records_is_known(0) + local_data_file_version(0), records_is_known(0) { /* Set our original buffers from pre-allocated memory */ - buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info); + buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin); chain= chain_buffer; file_buff= new Transparent_file(); } @@ -454,7 +458,7 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) Encode a buffer into the quoted format. */ -int ha_tina::encode_quote(byte *buf) +int ha_tina::encode_quote(uchar *buf) { char attribute_buffer[1024]; String attribute(attribute_buffer, sizeof(attribute_buffer), @@ -558,7 +562,7 @@ int ha_tina::chain_append() if (chain_alloced) { /* Must cast since my_malloc unlike malloc doesn't have a void ptr */ - if ((chain= (tina_set *) my_realloc((gptr)chain, + if ((chain= (tina_set *) my_realloc((uchar*)chain, chain_size, MYF(MY_WME))) == NULL) return -1; } @@ -584,12 +588,13 @@ int ha_tina::chain_append() /* Scans for a row. */ -int ha_tina::find_current_row(byte *buf) +int ha_tina::find_current_row(uchar *buf) { off_t end_offset, curr_offset= current_position; int eoln_len; my_bitmap_map *org_bitmap; int error; + bool read_all; DBUG_ENTER("ha_tina::find_current_row"); /* @@ -601,6 +606,8 @@ int ha_tina::find_current_row(byte *buf) local_saved_data_file_length, &eoln_len)) == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); + /* We must read all columns in case a table is opened for update */ + read_all= !bitmap_is_clear_all(table->write_set); /* Avoid asserts in ::store() for columns that are not going to be updated */ org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); error= HA_ERR_CRASHED_ON_USAGE; @@ -609,37 +616,41 @@ int ha_tina::find_current_row(byte *buf) for (Field **field=table->field ; *field ; field++) { + char curr_char; + buffer.length(0); - if (curr_offset < end_offset && - file_buff->get_value(curr_offset) == '"') + if (curr_offset >= end_offset) + goto err; + curr_char= file_buff->get_value(curr_offset); + if (curr_char == '"') { curr_offset++; // Incrementpast the first quote - for(;curr_offset < end_offset; curr_offset++) + for(; curr_offset < end_offset; curr_offset++) { + curr_char= file_buff->get_value(curr_offset); // Need to convert line feeds! - if (file_buff->get_value(curr_offset) == '"' && - ((file_buff->get_value(curr_offset + 1) == ',') || - (curr_offset == end_offset -1 ))) + if (curr_char == '"' && + (curr_offset == end_offset - 1 || + file_buff->get_value(curr_offset + 1) == ',')) { curr_offset+= 2; // Move past the , and the " break; } - if (file_buff->get_value(curr_offset) == '\\' && - curr_offset != (end_offset - 1)) + if (curr_char == '\\' && curr_offset != (end_offset - 1)) { curr_offset++; - if (file_buff->get_value(curr_offset) == 'r') + curr_char= file_buff->get_value(curr_offset); + if (curr_char == 'r') buffer.append('\r'); - else if (file_buff->get_value(curr_offset) == 'n' ) + else if (curr_char == 'n' ) buffer.append('\n'); - else if ((file_buff->get_value(curr_offset) == '\\') || - (file_buff->get_value(curr_offset) == '"')) - buffer.append(file_buff->get_value(curr_offset)); + else if (curr_char == '\\' || curr_char == '"') + buffer.append(curr_char); else /* This could only happed with an externally created file */ { buffer.append('\\'); - buffer.append(file_buff->get_value(curr_offset)); + buffer.append(curr_char); } } else // ordinary symbol @@ -650,36 +661,30 @@ int ha_tina::find_current_row(byte *buf) */ if (curr_offset == end_offset - 1) goto err; - buffer.append(file_buff->get_value(curr_offset)); + buffer.append(curr_char); } } } - else if (my_isdigit(system_charset_info, - file_buff->get_value(curr_offset))) + else { - for(;curr_offset < end_offset; curr_offset++) + for(; curr_offset < end_offset; curr_offset++) { - if (file_buff->get_value(curr_offset) == ',') + curr_char= file_buff->get_value(curr_offset); + if (curr_char == ',') { - curr_offset+= 1; // Move past the , + curr_offset++; // Skip the , break; } - - if (my_isdigit(system_charset_info, file_buff->get_value(curr_offset))) - buffer.append(file_buff->get_value(curr_offset)); - else if (file_buff->get_value(curr_offset) == '.') - buffer.append(file_buff->get_value(curr_offset)); - else - goto err; + buffer.append(curr_char); } } - else + + if (read_all || bitmap_is_set(table->read_set, (*field)->field_index)) { - goto err; + if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(), + CHECK_FIELD_WARN)) + goto err; } - - if (bitmap_is_set(table->read_set, (*field)->field_index)) - (*field)->store(buffer.ptr(), buffer.length(), system_charset_info); } next_position= end_offset + eoln_len; error= 0; @@ -785,17 +790,6 @@ void ha_tina::update_status() } -bool ha_tina::check_if_locking_is_allowed(uint sql_command, - ulong type, TABLE *table, - uint count, - bool called_by_privileged_thread) -{ - if (!called_by_privileged_thread) - return check_if_log_table_locking_is_allowed(sql_command, type, table); - - return TRUE; -} - /* Open a database file. Keep in mind that tables are caches, so this will not be called for every request. Any sort of positions @@ -814,6 +808,7 @@ int ha_tina::open(const char *name, int mode, uint open_options) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); } + local_data_file_version= share->data_file_version; if ((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1) DBUG_RETURN(0); @@ -850,7 +845,7 @@ int ha_tina::close(void) of the file and appends the data. In an error case it really should just truncate to the original position (this is not done yet). */ -int ha_tina::write_row(byte * buf) +int ha_tina::write_row(uchar * buf) { int size; DBUG_ENTER("ha_tina::write_row"); @@ -870,7 +865,7 @@ int ha_tina::write_row(byte * buf) DBUG_RETURN(-1); /* use pwrite, as concurrent reader could have changed the position */ - if (my_write(share->tina_write_filedes, (byte*)buffer.ptr(), size, + if (my_write(share->tina_write_filedes, (uchar*)buffer.ptr(), size, MYF(MY_WME | MY_NABP))) DBUG_RETURN(-1); @@ -903,6 +898,7 @@ int ha_tina::open_update_temp_file_if_needed() 0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) return 1; share->update_file_opened= TRUE; + temp_file_length= 0; } return 0; } @@ -915,7 +911,7 @@ int ha_tina::open_update_temp_file_if_needed() This will be called in a table scan right before the previous ::rnd_next() call. */ -int ha_tina::update_row(const byte * old_data, byte * new_data) +int ha_tina::update_row(const uchar * old_data, uchar * new_data) { int size; DBUG_ENTER("ha_tina::update_row"); @@ -927,15 +923,23 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) size= encode_quote(new_data); + /* + During update we mark each updating record as deleted + (see the chain_append()) then write new one to the temporary data file. + At the end of the sequence in the rnd_end() we append all non-marked + records from the data file to the temporary data file then rename it. + The temp_file_length is used to calculate new data file length. + */ if (chain_append()) DBUG_RETURN(-1); if (open_update_temp_file_if_needed()) DBUG_RETURN(-1); - if (my_write(update_temp_file, (byte*)buffer.ptr(), size, + if (my_write(update_temp_file, (uchar*)buffer.ptr(), size, MYF(MY_WME | MY_NABP))) DBUG_RETURN(-1); + temp_file_length+= size; /* UPDATE should never happen on the log tables */ DBUG_ASSERT(!share->is_log_table); @@ -953,7 +957,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) The table will then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC). */ -int ha_tina::delete_row(const byte * buf) +int ha_tina::delete_row(const uchar * buf) { DBUG_ENTER("ha_tina::delete_row"); ha_statistic_increment(&SSV::ha_delete_count); @@ -962,6 +966,11 @@ int ha_tina::delete_row(const byte * buf) DBUG_RETURN(-1); stats.records--; + /* Update shared info */ + DBUG_ASSERT(share->rows_recorded); + pthread_mutex_lock(&share->mutex); + share->rows_recorded--; + pthread_mutex_unlock(&share->mutex); /* DELETE should never happen on the log table */ DBUG_ASSERT(!share->is_log_table); @@ -970,6 +979,33 @@ int ha_tina::delete_row(const byte * buf) } +/** + @brief Initialize the data file. + + @details Compare the local version of the data file with the shared one. + If they differ, there are some changes behind and we have to reopen + the data file to make the changes visible. + Call @c file_buff->init_buff() at the end to read the beginning of the + data file into buffer. + + @retval 0 OK. + @retval 1 There was an error. +*/ + +int ha_tina::init_data_file() +{ + if (local_data_file_version != share->data_file_version) + { + local_data_file_version= share->data_file_version; + if (my_close(data_file, MYF(0)) || + (data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1) + return 1; + } + file_buff->init_buff(data_file); + return 0; +} + + /* All table scans call this first. The order of a table scan is: @@ -1006,9 +1042,8 @@ int ha_tina::rnd_init(bool scan) DBUG_ENTER("ha_tina::rnd_init"); /* set buffer to the beginning of the file */ - file_buff->init_buff(data_file); - if (share->crashed) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if (share->crashed || init_data_file()) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); current_position= next_position= 0; stats.records= 0; @@ -1032,7 +1067,7 @@ int ha_tina::rnd_init(bool scan) NULL and "". This is ok since this table handler is for spreadsheets and they don't know about them either :) */ -int ha_tina::rnd_next(byte *buf) +int ha_tina::rnd_next(uchar *buf) { int rc; DBUG_ENTER("ha_tina::rnd_next"); @@ -1064,7 +1099,7 @@ int ha_tina::rnd_next(byte *buf) its just a position. Look at the bdb code if you want to see a case where something other then a number is stored. */ -void ha_tina::position(const byte *record) +void ha_tina::position(const uchar *record) { DBUG_ENTER("ha_tina::position"); my_store_ptr(ref, ref_length, current_position); @@ -1077,7 +1112,7 @@ void ha_tina::position(const byte *record) my_get_ptr() retrieves the data for you. */ -int ha_tina::rnd_pos(byte * buf, byte *pos) +int ha_tina::rnd_pos(uchar * buf, uchar *pos) { DBUG_ENTER("ha_tina::rnd_pos"); ha_statistic_increment(&SSV::ha_read_rnd_next_count); @@ -1173,15 +1208,18 @@ int ha_tina::rnd_end() while ((file_buffer_start != -1)) // while not end of file { bool in_hole= get_write_pos(&write_end, ptr); + off_t write_length= write_end - write_begin; /* if there is something to write, write it */ - if ((write_end - write_begin) && - (my_write(update_temp_file, - (byte*)(file_buff->ptr() + - (write_begin - file_buff->start())), - write_end - write_begin, MYF_RW))) - goto error; - + if (write_length) + { + if (my_write(update_temp_file, + (uchar*) (file_buff->ptr() + + (write_begin - file_buff->start())), + write_length, MYF_RW)) + goto error; + temp_file_length+= write_length; + } if (in_hole) { /* skip hole */ @@ -1229,11 +1267,26 @@ int ha_tina::rnd_end() if (((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1)) DBUG_RETURN(-1); /* + As we reopened the data file, increase share->data_file_version + in order to force other threads waiting on a table lock and + have already opened the table to reopen the data file. + That makes the latest changes become visible to them. + Update local_data_file_version as no need to reopen it in the + current thread. + */ + share->data_file_version++; + local_data_file_version= share->data_file_version; + /* The datafile is consistent at this point and the write filedes is closed, so nothing worrying will happen to it in case of a crash. Here we record this fact to the meta-file. */ (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); + /* + Update local_saved_data_file_length with the real length of the + data file. + */ + local_saved_data_file_length= temp_file_length; } DBUG_RETURN(0); @@ -1265,7 +1318,7 @@ error: int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) { char repaired_fname[FN_REFLEN]; - byte *buf; + uchar *buf; File repair_file; int rc; ha_rows rows_repaired= 0; @@ -1281,11 +1334,12 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* Don't assert in field::val() functions */ table->use_all_columns(); - if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ - file_buff->init_buff(data_file); + if (init_data_file()) + DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); /* Local_saved_data_file_length is initialized during the lock phase. @@ -1299,6 +1353,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* Read the file row-by-row. If everything is ok, repair is not needed. */ while (!(rc= find_current_row(buf))) { + thd_inc_row_count(thd); rows_repaired++; current_position= next_position; } @@ -1337,7 +1392,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) { write_end= min(file_buff->end(), current_position); if ((write_end - write_begin) && - (my_write(repair_file, (byte*)file_buff->ptr(), + (my_write(repair_file, (uchar*)file_buff->ptr(), write_end - write_begin, MYF_RW))) DBUG_RETURN(-1); @@ -1391,6 +1446,11 @@ int ha_tina::delete_all_rows() rc= my_chsize(share->tina_write_filedes, 0, 0, MYF(MY_WME)); stats.records=0; + /* Update shared info */ + pthread_mutex_lock(&share->mutex); + share->rows_recorded= 0; + pthread_mutex_unlock(&share->mutex); + local_saved_data_file_length= 0; DBUG_RETURN(rc); } @@ -1441,17 +1501,18 @@ int ha_tina::create(const char *name, TABLE *table_arg, int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) { int rc= 0; - byte *buf; + uchar *buf; const char *old_proc_info; ha_rows count= share->rows_recorded; DBUG_ENTER("ha_tina::check"); old_proc_info= thd_proc_info(thd, "Checking table"); - if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ - file_buff->init_buff(data_file); + if (init_data_file()) + DBUG_RETURN(HA_ERR_CRASHED); /* Local_saved_data_file_length is initialized during the lock phase. @@ -1464,6 +1525,7 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) /* Read the file row-by-row. If everything is ok, repair is not needed. */ while (!(rc= find_current_row(buf))) { + thd_inc_row_count(thd); count--; current_position= next_position; } diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h index 0c667237c0f..5ce09783b9b 100644 --- a/storage/csv/ha_tina.h +++ b/storage/csv/ha_tina.h @@ -49,6 +49,7 @@ typedef struct st_tina_share { File tina_write_filedes; /* File handler for readers */ bool crashed; /* Meta file is crashed */ ha_rows rows_recorded; /* Number of rows in tables */ + uint data_file_version; /* Version of the data file used */ } TINA_SHARE; struct tina_set { @@ -63,7 +64,8 @@ class ha_tina: public handler off_t current_position; /* Current position in the file during a file scan */ off_t next_position; /* Next position in the file scan */ off_t local_saved_data_file_length; /* save position for reads */ - byte byte_buffer[IO_SIZE]; + off_t temp_file_length; + uchar byte_buffer[IO_SIZE]; Transparent_file *file_buff; File data_file; /* File handler for readers */ File update_temp_file; @@ -76,21 +78,23 @@ class ha_tina: public handler tina_set chain_buffer[DEFAULT_CHAIN_LENGTH]; tina_set *chain; tina_set *chain_ptr; - byte chain_alloced; + uchar chain_alloced; uint32 chain_size; + uint local_data_file_version; /* Saved version of the data file used */ bool records_is_known; private: bool get_write_pos(off_t *end_pos, tina_set *closest_hole); int open_update_temp_file_if_needed(); int init_tina_writer(); + int init_data_file(); public: ha_tina(handlerton *hton, TABLE_SHARE *table_arg); ~ha_tina() { if (chain_alloced) - my_free((gptr)chain, 0); + my_free(chain, 0); if (file_buff) delete file_buff; } @@ -99,7 +103,8 @@ public: const char **bas_ext() const; ulonglong table_flags() const { - return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT); + return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE); } ulong index_flags(uint idx, uint part, bool all_parts) const { @@ -126,18 +131,14 @@ public: */ ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } - virtual bool check_if_locking_is_allowed(uint sql_command, - ulong type, TABLE *table, - uint count, - bool called_by_logger_thread); int open(const char *name, int mode, uint open_options); int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); + int write_row(uchar * buf); + int update_row(const uchar * old_data, uchar * new_data); + int delete_row(const uchar * buf); int rnd_init(bool scan=1); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); bool check_and_repair(THD *thd); int check(THD* thd, HA_CHECK_OPT* check_opt); bool is_crashed() const; @@ -145,7 +146,7 @@ public: int repair(THD* thd, HA_CHECK_OPT* check_opt); /* This is required for SQL layer to know that we support autorepair */ bool auto_repair() const { return 1; } - void position(const byte *record); + void position(const uchar *record); int info(uint); int extra(enum ha_extra_function operation); int delete_all_rows(void); @@ -164,8 +165,8 @@ public: void update_status(); /* The following methods were added just for TINA */ - int encode_quote(byte *buf); - int find_current_row(byte *buf); + int encode_quote(uchar *buf); + int find_current_row(uchar *buf); int chain_append(); }; diff --git a/storage/csv/transparent_file.cc b/storage/csv/transparent_file.cc index 27cc8c024b4..a200fa6ac36 100644 --- a/storage/csv/transparent_file.cc +++ b/storage/csv/transparent_file.cc @@ -22,12 +22,12 @@ Transparent_file::Transparent_file() : lower_bound(0), buff_size(IO_SIZE) { - buff= (byte *) my_malloc(buff_size*sizeof(byte), MYF(MY_WME)); + buff= (uchar *) my_malloc(buff_size*sizeof(uchar), MYF(MY_WME)); } Transparent_file::~Transparent_file() { - my_free((gptr)buff, MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*)buff, MYF(MY_ALLOW_ZERO_PTR)); } void Transparent_file::init_buff(File filedes_arg) @@ -40,7 +40,7 @@ void Transparent_file::init_buff(File filedes_arg) upper_bound= my_read(filedes, buff, buff_size, MYF(0)); } -byte *Transparent_file::ptr() +uchar *Transparent_file::ptr() { return buff; } @@ -57,18 +57,18 @@ off_t Transparent_file::end() off_t Transparent_file::read_next() { - off_t bytes_read; + size_t bytes_read; /* No need to seek here, as the file managed by Transparent_file class always points to upper_bound byte */ if ((bytes_read= my_read(filedes, buff, buff_size, MYF(0))) == MY_FILE_ERROR) - return -1; + return (off_t) -1; /* end of file */ if (!bytes_read) - return -1; + return (off_t) -1; lower_bound= upper_bound; upper_bound+= bytes_read; @@ -79,26 +79,24 @@ off_t Transparent_file::read_next() char Transparent_file::get_value(off_t offset) { - off_t bytes_read; + size_t bytes_read; /* check boundaries */ if ((lower_bound <= offset) && (offset < upper_bound)) return buff[offset - lower_bound]; - else - { - VOID(my_seek(filedes, offset, MY_SEEK_SET, MYF(0))); - /* read appropriate portion of the file */ - if ((bytes_read= my_read(filedes, buff, buff_size, - MYF(0))) == MY_FILE_ERROR) - return 0; - - lower_bound= offset; - upper_bound= lower_bound + bytes_read; - - /* end of file */ - if (upper_bound == offset) - return 0; - - return buff[0]; - } + + VOID(my_seek(filedes, offset, MY_SEEK_SET, MYF(0))); + /* read appropriate portion of the file */ + if ((bytes_read= my_read(filedes, buff, buff_size, + MYF(0))) == MY_FILE_ERROR) + return 0; + + lower_bound= offset; + upper_bound= lower_bound + bytes_read; + + /* end of file */ + if (upper_bound == offset) + return 0; + + return buff[0]; } diff --git a/storage/csv/transparent_file.h b/storage/csv/transparent_file.h index ceb59ec7caf..4c0f4cce7e7 100644 --- a/storage/csv/transparent_file.h +++ b/storage/csv/transparent_file.h @@ -21,7 +21,7 @@ class Transparent_file { File filedes; - byte *buff; /* in-memory window to the file or mmaped area */ + uchar *buff; /* in-memory window to the file or mmaped area */ /* current window sizes */ off_t lower_bound; off_t upper_bound; @@ -33,7 +33,7 @@ public: ~Transparent_file(); void init_buff(File filedes_arg); - byte *ptr(); + uchar *ptr(); off_t start(); off_t end(); char get_value (off_t offset); diff --git a/storage/example/CMakeLists.txt b/storage/example/CMakeLists.txt index f4e8639ef25..99c223f3f85 100644 --- a/storage/example/CMakeLists.txt +++ b/storage/example/CMakeLists.txt @@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(example ha_example.cc) + +SET(EXAMPLE_SOURCES ha_example.cc) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(example ${EXAMPLE_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index 12ca91f0a6f..65e28da1fe3 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -13,39 +13,44 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/** @file ha_example.cc +/** + @file ha_example.cc - @brief + @brief The ha_example engine is a stubbed storage engine for example purposes only; it does nothing at this point. Its purpose is to provide a source code illustration of how to begin writing new storage engines; see also /storage/example/ha_example.h. - @details - ha_example will let you create/open/delete tables, but nothing further - (for example, indexes are not supported nor can data be stored in the - table). Use this example as a template for implementing the same functionality - in your own storage engine. You can enable the example storage engine in - your build by doing the following during your build process:<br> - ./configure --with-example-storage-engine + @details + ha_example will let you create/open/delete tables, but + nothing further (for example, indexes are not supported nor can data + be stored in the table). Use this example as a template for + implementing the same functionality in your own storage engine. You + can enable the example storage engine in your build by doing the + following during your build process:<br> ./configure + --with-example-storage-engine Once this is done, MySQL will let you create tables with:<br> CREATE TABLE <table name> (...) ENGINE=EXAMPLE; - The example storage engine is set up to use table locks. It implements an - example "SHARE" that is inserted into a hash by table name. You can use this - to store information of state that any example handler object will be able to - see when it is using that table. + The example storage engine is set up to use table locks. It + implements an example "SHARE" that is inserted into a hash by table + name. You can use this to store information of state that any + example handler object will be able to see when it is using that + table. Please read the object definition in ha_example.h before reading the rest of this file. - @note - When you create an EXAMPLE table, the MySQL Server creates a table .frm (format) - file in the database directory, using the table name as the file name as is - customary with MySQL. No other files are created. To get an idea of what occurs, - here is an example select that would do a scan of an entire table: - @code + @note + When you create an EXAMPLE table, the MySQL Server creates a table .frm + (format) file in the database directory, using the table name as the file + name as is customary with MySQL. No other files are created. To get an idea + of what occurs, here is an example select that would do a scan of an entire + table: + + @code ha_example::store_lock ha_example::external_lock ha_example::info @@ -66,13 +71,13 @@ ha_example::external_lock ha_example::extra ENUM HA_EXTRA_RESET Reset database to after open - @endcode + @endcode - Here you see that the example storage engine has 9 rows called before rnd_next - signals that it has reached the end of its data. Also note that the table in - question was already opened; had it not been open, a call to ha_example::open() - would also have been necessary. Calls to ha_example::extra() are hints as to - what will be occuring to the request. + Here you see that the example storage engine has 9 rows called before + rnd_next signals that it has reached the end of its data. Also note that + the table in question was already opened; had it not been open, a call to + ha_example::open() would also have been necessary. Calls to + ha_example::extra() are hints as to what will be occuring to the request. Happy coding!<br> -Brian @@ -90,25 +95,33 @@ static handler *example_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); -static int example_init_func(); handlerton *example_hton; /* Variables for example share methods */ -static HASH example_open_tables; ///< Hash used to track the number of open tables; variable for example share methods -pthread_mutex_t example_mutex; ///< This is the mutex used to init the hash; variable for example share methods -static int example_init= 0; ///< This variable is used to check the init state of hash; variable for example share methods -/** @brief +/* + Hash used to track the number of open tables; variable for example share + methods +*/ +static HASH example_open_tables; + +/* The mutex used to init the hash; variable for example share methods */ +pthread_mutex_t example_mutex; + +/** + @brief Function we use in the creation of our hash to get key. */ -static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, + +static uchar* example_get_key(EXAMPLE_SHARE *share, size_t *length, my_bool not_used __attribute__((unused))) { *length=share->table_name_length; - return (byte*) share->table_name; + return (uchar*) share->table_name; } + static int example_init_func(void *p) { DBUG_ENTER("example_init_func"); @@ -126,6 +139,7 @@ static int example_init_func(void *p) DBUG_RETURN(0); } + static int example_done_func(void *p) { int error= 0; @@ -139,11 +153,15 @@ static int example_done_func(void *p) DBUG_RETURN(0); } -/** @brief - Example of simple lock controls. The "share" it creates is a structure we will - pass to each example handler. Do you have to have one of these? Well, you have - pieces that are used for locking, and they are needed to function. + +/** + @brief + Example of simple lock controls. The "share" it creates is a + structure we will pass to each example handler. Do you have to have + one of these? Well, you have pieces that are used for locking, and + they are needed to function. */ + static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) { EXAMPLE_SHARE *share; @@ -154,7 +172,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) length=(uint) strlen(table_name); if (!(share=(EXAMPLE_SHARE*) hash_search(&example_open_tables, - (byte*) table_name, + (uchar*) table_name, length))) { if (!(share=(EXAMPLE_SHARE *) @@ -171,7 +189,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) share->table_name_length=length; share->table_name=tmp_name; strmov(share->table_name,table_name); - if (my_hash_insert(&example_open_tables, (byte*) share)) + if (my_hash_insert(&example_open_tables, (uchar*) share)) goto error; thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); @@ -183,24 +201,27 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) error: pthread_mutex_destroy(&share->mutex); - my_free((gptr) share, MYF(0)); + my_free(share, MYF(0)); return NULL; } -/** @brief + +/** + @brief Free lock controls. We call this whenever we close a table. If the table had the last reference to the share, then we free memory associated with it. */ + static int free_share(EXAMPLE_SHARE *share) { pthread_mutex_lock(&example_mutex); if (!--share->use_count) { - hash_delete(&example_open_tables, (byte*) share); + hash_delete(&example_open_tables, (uchar*) share); thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); - my_free((gptr) share, MYF(0)); + my_free(share, MYF(0)); } pthread_mutex_unlock(&example_mutex); @@ -218,15 +239,25 @@ ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg) :handler(hton, table_arg) {} -/** @brief - If frm_error() is called then we will use this to determine the file extensions - that exist for the storage engine. This is also used by the default rename_table - and delete_table method in handler.cc. - @see +/** + @brief + If frm_error() is called then we will use this to determine + the file extensions that exist for the storage engine. This is also + used by the default rename_table and delete_table method in + handler.cc. + + For engines that have two file name extentions (separate meta/index file + and data file), the order of elements is relevant. First element of engine + file name extentions array should be meta/index file extention. Second + element - data file extention. This order is assumed by + prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued. + + @see rename_table method in handler.cc and delete_table method in handler.cc */ + static const char *ha_example_exts[] = { NullS }; @@ -236,10 +267,12 @@ const char **ha_example::bas_ext() const return ha_example_exts; } -/** @brief + +/** + @brief Used for opening tables. The name will be the name of the file. - @details + @details A table is opened when it needs to be opened; e.g. when a request comes in for a SELECT on the table (tables are not open and closed for each request, they are cached). @@ -247,9 +280,10 @@ const char **ha_example::bas_ext() const Called from handler.cc by handler::ha_open(). The server opens all tables by calling ha_open() which then calls the handler specific open(). - @see + @see handler::ha_open() in handler.cc */ + int ha_example::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_example::open"); @@ -261,27 +295,32 @@ int ha_example::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(0); } -/** @brief + +/** + @brief Closes a table. We call the free_share() function to free any resources that we have allocated in the "shared" structure. - @details + @details Called from sql_base.cc, sql_select.cc, and table.cc. In sql_select.cc it is - only used to close up temporary tables or during the process where a temporary - table is converted over to being a myisam table. + only used to close up temporary tables or during the process where a + temporary table is converted over to being a myisam table. For sql_base.cc look at close_data_tables(). - @see + @see sql_base.cc, sql_select.cc and table.cc */ + int ha_example::close(void) { DBUG_ENTER("ha_example::close"); DBUG_RETURN(free_share(share)); } -/** @brief + +/** + @brief write_row() inserts a row. No extra() hint is given currently if a bulk load is happening. buf() is a byte array of data. You can use the field information to extract the data from the native byte array type. @@ -309,13 +348,16 @@ int ha_example::close(void) item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc */ -int ha_example::write_row(byte * buf) + +int ha_example::write_row(uchar *buf) { DBUG_ENTER("ha_example::write_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Yes, update_row() does what you expect, it updates a row. old_data will have the previous row record in it, while new_data will have the newest data in it. Keep in mind that the server can do updates based on ordering if an ORDER BY @@ -336,69 +378,84 @@ int ha_example::write_row(byte * buf) @see sql_select.cc, sql_acl.cc, sql_update.cc and sql_insert.cc */ -int ha_example::update_row(const byte * old_data, byte * new_data) +int ha_example::update_row(const uchar *old_data, uchar *new_data) { DBUG_ENTER("ha_example::update_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This will delete a row. buf will contain a copy of the row to be deleted. The server will call this right after the current row has been called (from either a previous rnd_nexT() or index call). - @details + @details If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does not guarantee consecutive deletions. ORDER BY clauses can be used. - Called in sql_acl.cc and sql_udf.cc to manage internal table information. - Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select it is - used for removing duplicates while in insert it is used for REPLACE calls. + Called in sql_acl.cc and sql_udf.cc to manage internal table + information. Called in sql_delete.cc, sql_insert.cc, and + sql_select.cc. In sql_select it is used for removing duplicates + while in insert it is used for REPLACE calls. - @see + @see sql_acl.cc, sql_udf.cc, sql_delete.cc, sql_insert.cc and sql_select.cc */ -int ha_example::delete_row(const byte * buf) + +int ha_example::delete_row(const uchar *buf) { DBUG_ENTER("ha_example::delete_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Positions an index cursor to the index specified in the handle. Fetches the row if available. If the key value is null, begin at the first key of the index. */ -int ha_example::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) + +int ha_example::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) { DBUG_ENTER("ha_example::index_read"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read forward through the index. */ -int ha_example::index_next(byte * buf) + +int ha_example::index_next(uchar *buf) { DBUG_ENTER("ha_example::index_next"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read backwards through the index. */ -int ha_example::index_prev(byte * buf) + +int ha_example::index_prev(uchar *buf) { DBUG_ENTER("ha_example::index_prev"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_first() asks for the first key in the index. @details @@ -407,13 +464,15 @@ int ha_example::index_prev(byte * buf) @see opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc */ -int ha_example::index_first(byte * buf) +int ha_example::index_first(uchar *buf) { DBUG_ENTER("ha_example::index_first"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_last() asks for the last key in the index. @details @@ -422,13 +481,15 @@ int ha_example::index_first(byte * buf) @see opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc */ -int ha_example::index_last(byte * buf) +int ha_example::index_last(uchar *buf) { DBUG_ENTER("ha_example::index_last"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief rnd_init() is called when the system wants the storage engine to do a table scan. See the example in the introduction at the top of this file to see when rnd_init() is called. @@ -452,7 +513,9 @@ int ha_example::rnd_end() DBUG_RETURN(0); } -/** @brief + +/** + @brief This is called for each row of the table scan. When you run out of records you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. The Field structure for the table is the key to getting data into buf @@ -465,13 +528,15 @@ int ha_example::rnd_end() @see filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc and sql_update.cc */ -int ha_example::rnd_next(byte *buf) +int ha_example::rnd_next(uchar *buf) { DBUG_ENTER("ha_example::rnd_next"); DBUG_RETURN(HA_ERR_END_OF_FILE); } -/** @brief + +/** + @brief position() is called after each call to rnd_next() if the data needs to be ordered. You can do something like the following to store the position: @@ -491,13 +556,15 @@ int ha_example::rnd_next(byte *buf) @see filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc */ -void ha_example::position(const byte *record) +void ha_example::position(const uchar *record) { DBUG_ENTER("ha_example::position"); DBUG_VOID_RETURN; } -/** @brief + +/** + @brief This is like rnd_next, but you are given a position to use to determine the row. The position will be of the type that you stored in ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key @@ -509,13 +576,15 @@ void ha_example::position(const byte *record) @see filesort.cc, records.cc, sql_insert.cc, sql_select.cc and sql_update.cc */ -int ha_example::rnd_pos(byte * buf, byte *pos) +int ha_example::rnd_pos(uchar *buf, uchar *pos) { DBUG_ENTER("ha_example::rnd_pos"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief ::info() is used to return information to the optimizer. See my_base.h for the complete description. @@ -558,7 +627,9 @@ int ha_example::info(uint flag) DBUG_RETURN(0); } -/** @brief + +/** + @brief extra() is called whenever the server wishes to send a hint to the storage engine. The myisam engine implements the most hints. ha_innodb.cc has the most exhaustive list of these hints. @@ -572,7 +643,9 @@ int ha_example::extra(enum ha_extra_function operation) DBUG_RETURN(0); } -/** @brief + +/** + @brief Used to delete all rows in a table, including cases of truncate and cases where the optimizer realizes that all rows will be removed as a result of an SQL statement. @@ -596,7 +669,9 @@ int ha_example::delete_all_rows() DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This create a lock on the table. If you are implementing a storage engine that can handle transacations look at ha_berkely.cc to see how you will want to go about doing this. Otherwise you should consider calling flock() @@ -618,7 +693,9 @@ int ha_example::external_lock(THD *thd, int lock_type) DBUG_RETURN(0); } -/** @brief + +/** + @brief The idea with handler::store_lock() is: The statement decides which locks should be needed for the table. For updates/deletes/inserts we get WRITE locks, for SELECT... we get read locks. @@ -646,6 +723,11 @@ int ha_example::external_lock(THD *thd, int lock_type) Called from lock.cc by get_lock_data(). + @note + In this method one should NEVER rely on table->in_use, it may, in fact, + refer to a different thread! (this happens if get_lock_data() is called + from mysql_lock_abort_for_thread() function) + @see get_lock_data() in lock.cc */ @@ -659,7 +741,9 @@ THR_LOCK_DATA **ha_example::store_lock(THD *thd, return to; } -/** @brief + +/** + @brief Used to delete a table. By the time delete_table() has been called all opened references to this table will have been closed (and your globally shared references released). The variable name will just be the name of @@ -684,17 +768,19 @@ int ha_example::delete_table(const char *name) DBUG_RETURN(0); } -/** @brief + +/** + @brief Renames a table from one name to another via an alter table call. - @details + @details If you do not implement this, the default rename_table() is called from handler.cc and it will delete all files with the file extensions returned by bas_ext(). Called from sql_table.cc by mysql_rename_table(). - @see + @see mysql_rename_table() in sql_table.cc */ int ha_example::rename_table(const char * from, const char * to) @@ -703,16 +789,18 @@ int ha_example::rename_table(const char * from, const char * to) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. - @details + @details end_key may be empty, in which case determine if start_key matches any rows. Called from opt_range.cc by check_quick_keys(). - @see + @see check_quick_keys() in opt_range.cc */ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, @@ -722,29 +810,38 @@ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, DBUG_RETURN(10); // low number to force index usage } -/** @brief + +/** + @brief create() is called to create a database. The variable name will have the name of the table. - @details - When create() is called you do not need to worry about opening the table. Also, - the .frm file will have already been created so adjusting create_info is not - necessary. You can overwrite the .frm file at this point if you wish to change - the table definition, but there are no methods currently provided for doing so. + @details + When create() is called you do not need to worry about + opening the table. Also, the .frm file will have already been + created so adjusting create_info is not necessary. You can overwrite + the .frm file at this point if you wish to change the table + definition, but there are no methods currently provided for doing + so. Called from handle.cc by ha_create_table(). - @see + @see ha_create_table() in handle.cc */ + int ha_example::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { DBUG_ENTER("ha_example::create"); - /* This is not implemented but we want someone to be able to see that it works. */ + /* + This is not implemented but we want someone to be able to see that it + works. + */ DBUG_RETURN(0); } + struct st_mysql_storage_engine example_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; @@ -756,11 +853,11 @@ mysql_declare_plugin(example) "Brian Aker, MySQL AB", "Example storage engine", PLUGIN_LICENSE_GPL, - example_init_func, /* Plugin Init */ - example_done_func, /* Plugin Deinit */ + example_init_func, /* Plugin Init */ + example_done_func, /* Plugin Deinit */ 0x0001 /* 0.1 */, - NULL, /* status variables */ - NULL, /* system variables */ - NULL /* config options */ + NULL, /* status variables */ + NULL, /* system variables */ + NULL /* config options */ } mysql_declare_plugin_end; diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h index 9b912514887..ec3987ced5d 100644 --- a/storage/example/ha_example.h +++ b/storage/example/ha_example.h @@ -82,7 +82,12 @@ public: */ ulonglong table_flags() const { - return 0; + /* + We are saying that this engine is just row capable to have an + engine that can only handle row-based logging. This is used in + testing. + */ + return HA_BINLOG_ROW_CAPABLE; } /** @brief @@ -172,50 +177,50 @@ public: We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int write_row(byte * buf); + int write_row(uchar *buf); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int update_row(const byte * old_data, byte * new_data); + int update_row(const uchar *old_data, uchar *new_data); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int delete_row(const byte * buf); + int delete_row(const uchar *buf); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + int index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, enum ha_rkey_function find_flag); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int index_next(byte * buf); + int index_next(uchar *buf); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int index_prev(byte * buf); + int index_prev(uchar *buf); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int index_first(byte * buf); + int index_first(uchar *buf); /** @brief We implement this in ha_example.cc. It's not an obligatory method; skip it and and MySQL will treat it as not implemented. */ - int index_last(byte * buf); + int index_last(uchar *buf); /** @brief Unlike index_init(), rnd_init() can be called two consecutive times @@ -227,9 +232,9 @@ public: */ int rnd_init(bool scan); //required int rnd_end(); - int rnd_next(byte *buf); ///< required - int rnd_pos(byte * buf, byte *pos); ///< required - void position(const byte *record); ///< required + int rnd_next(uchar *buf); ///< required + int rnd_pos(uchar *buf, uchar *pos); ///< required + void position(const uchar *record); ///< required int info(uint); ///< required int extra(enum ha_extra_function operation); int external_lock(THD *thd, int lock_type); ///< required diff --git a/storage/federated/CMakeLists.txt b/storage/federated/CMakeLists.txt index 1f1f4dcd517..62064a633b1 100644 --- a/storage/federated/CMakeLists.txt +++ b/storage/federated/CMakeLists.txt @@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(federated ha_federated.cc) + +SET(FEDERATED_SOURCES ha_federated.cc) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(federated ${FEDERATED_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 9290418c7aa..ded0ce88484 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -43,23 +43,55 @@ The create table will simply create the .frm file, and within the "CREATE TABLE" SQL, there SHALL be any of the following : - comment=scheme://username:password@hostname:port/database/tablename - comment=scheme://username@hostname/database/tablename - comment=scheme://username:password@hostname/database/tablename - comment=scheme://username:password@hostname/database/tablename + connection=scheme://username:password@hostname:port/database/tablename + connection=scheme://username@hostname/database/tablename + connection=scheme://username:password@hostname/database/tablename + connection=scheme://username:password@hostname/database/tablename + + - OR - + + As of 5.1 (See worklog #3031), federated now allows you to use a non-url + format, taking advantage of mysql.servers: + + connection="connection_one" + connection="connection_one/table_foo" An example would be: - comment=mysql://username:password@hostname:port/database/tablename + connection=mysql://username:password@hostname:port/database/tablename - ***IMPORTANT*** + or, if we had: + + create server 'server_one' foreign data wrapper 'mysql' options + (HOST '127.0.0.1', + DATABASE 'db1', + USER 'root', + PASSWORD '', + PORT 3306, + SOCKET '', + OWNER 'root'); + + CREATE TABLE federated.t1 ( + `id` int(20) NOT NULL, + `name` varchar(64) NOT NULL default '' + ) + ENGINE="FEDERATED" DEFAULT CHARSET=latin1 + CONNECTION='server_one'; - This is a first release, conceptual release - Only 'mysql://' is supported at this release. + So, this will have been the equivalent of + CONNECTION="mysql://root@127.0.0.1:3306/db1/t1" - This comment connection string is necessary for the handler to be - able to connect to the foreign server. + Then, we can also change the server to point to a new schema: + + ALTER SERVER 'server_one' options(DATABASE 'db2'); + + All subsequent calls will now be against db2.t1! Guess what? You don't + have to perform an alter table! + + This connecton="connection string" is necessary for the handler to be + able to connect to the foreign server, either by URL, or by server + name. The basic flow is this: @@ -166,7 +198,7 @@ KEY other_key (other)) ENGINE="FEDERATED" DEFAULT CHARSET=latin1 - COMMENT='root@127.0.0.1:9306/federated/test_federated'; + CONNECTION='mysql://root@127.0.0.1:9306/federated/test_federated'; Notice the "COMMENT" and "ENGINE" field? This is where you respectively set the engine type, "FEDERATED" and foreign @@ -263,7 +295,7 @@ To run these tests, go into ./mysql-test (based in the directory you built the server in) - ./mysql-test-run federatedd + ./mysql-test-run federated To run the test, or if you want to run the test and have debug info: @@ -311,7 +343,7 @@ ------------- These were the files that were modified or created for this - Federated handler to work: + Federated handler to work, in 5.0: ./configure.in ./sql/Makefile.am @@ -329,6 +361,13 @@ ./sql/ha_federated.cc ./sql/ha_federated.h + In 5.1 + + my:~/mysql-build/mysql-5.1-bkbits patg$ ls storage/federated/ + CMakeLists.txt Makefile.in ha_federated.h plug.in + Makefile SCCS libfederated.a + Makefile.am ha_federated.cc libfederated_a-ha_federated.o + */ @@ -349,6 +388,11 @@ /* Variables for federated share methods */ static HASH federated_open_tables; // To track open tables pthread_mutex_t federated_mutex; // To init the hash +static char ident_quote_char= '`'; // Character for quoting + // identifiers +static char value_quote_char= '\''; // Character for quoting + // literals +static const int bulk_padding= 64; // bytes "overhead" in packet /* Variables used when chopping off trailing characters */ static const uint sizeof_trailing_comma= sizeof(", ") - 1; @@ -362,8 +406,6 @@ static handler *federated_create_handler(handlerton *hton, MEM_ROOT *mem_root); static int federated_commit(handlerton *hton, THD *thd, bool all); static int federated_rollback(handlerton *hton, THD *thd, bool all); -static int federated_db_init(void); - /* Federated storage engine handlerton */ @@ -377,11 +419,11 @@ static handler *federated_create_handler(handlerton *hton, /* Function we use in the creation of our hash to get key */ -static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, - my_bool not_used __attribute__ ((unused))) +static uchar *federated_get_key(FEDERATED_SHARE *share, size_t *length, + my_bool not_used __attribute__ ((unused))) { *length= share->share_key_length; - return (byte*) share->share_key; + return (uchar*) share->share_key; } /* @@ -389,7 +431,7 @@ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, SYNOPSIS federated_db_init() - void + p Handlerton RETURN FALSE OK @@ -407,6 +449,13 @@ int federated_db_init(void *p) federated_hton->create= federated_create_handler; federated_hton->flags= HTON_ALTER_NOT_SUPPORTED | HTON_NO_PARTITION; + /* + Support for transactions disabled until WL#2952 fixes it. + We do it like this to avoid "defined but not used" compiler warnings. + */ + federated_hton->commit= 0; + federated_hton->rollback= 0; + if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) goto error; if (!hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0, @@ -440,105 +489,54 @@ int federated_done(void *p) } -/* - Check (in create) whether the tables exists, and that it can be connected to +/** + @brief Append identifiers to the string. - SYNOPSIS - check_foreign_data_source() - share pointer to FEDERATED share - table_create_flag tells us that ::create is the caller, - therefore, return CANT_CREATE_FEDERATED_TABLE + @param[in,out] string The target string. + @param[in] name Identifier name + @param[in] length Length of identifier name in bytes + @param[in] quote_char Quote char to use for quoting identifier. - DESCRIPTION - This method first checks that the connection information that parse url - has populated into the share will be sufficient to connect to the foreign - table, and if so, does the foreign table exist. + @return Operation Status + @retval FALSE OK + @retval TRUE There was an error appending to the string. + + @note This function is based upon the append_identifier() function + in sql_show.cc except that quoting always occurs. */ -static int check_foreign_data_source(FEDERATED_SHARE *share, - bool table_create_flag) +static bool append_ident(String *string, const char *name, uint length, + const char quote_char) { - char escaped_table_name[NAME_LEN*2]; - char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - uint error_code; - String query(query_buffer, sizeof(query_buffer), &my_charset_bin); - MYSQL *mysql; - DBUG_ENTER("ha_federated::check_foreign_data_source"); - - /* Zero the length, otherwise the string will have misc chars */ - query.length(0); + bool result; + uint clen; + const char *name_end; + DBUG_ENTER("append_ident"); - /* error out if we can't alloc memory for mysql_init(NULL) (per Georg) */ - if (!(mysql= mysql_init(NULL))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - /* check if we can connect */ - if (!mysql_real_connect(mysql, - share->hostname, - share->username, - share->password, - share->database, - share->port, - share->socket, 0)) + if (quote_char) { - /* - we want the correct error message, but it to return - ER_CANT_CREATE_FEDERATED_TABLE if called by ::create - */ - error_code= (table_create_flag ? - ER_CANT_CREATE_FEDERATED_TABLE : - ER_CONNECT_TO_FOREIGN_DATA_SOURCE); - - my_sprintf(error_buffer, - (error_buffer, - "database: '%s' username: '%s' hostname: '%s'", - share->database, share->username, share->hostname)); + string->reserve(length * 2 + 2); + if ((result= string->append("e_char, 1, system_charset_info))) + goto err; - my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), error_buffer); - goto error; - } - else - { - int escaped_table_name_length= 0; - /* - Since we do not support transactions at this version, we can let the - client API silently reconnect. For future versions, we will need more - logic to deal with transactions - */ - mysql->reconnect= 1; - /* - Note: I am not using INORMATION_SCHEMA because this needs to work with - versions prior to 5.0 - - if we can connect, then make sure the table exists - - the query will be: SELECT * FROM `tablename` WHERE 1=0 - */ - query.append(STRING_WITH_LEN("SELECT * FROM `")); - escaped_table_name_length= - escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name, - sizeof(escaped_table_name), - share->table_name, - share->table_name_length); - query.append(escaped_table_name, escaped_table_name_length); - query.append(STRING_WITH_LEN("` WHERE 1=0")); - - if (mysql_real_query(mysql, query.ptr(), query.length())) + for (name_end= name+length; name < name_end; name+= clen) { - error_code= table_create_flag ? - ER_CANT_CREATE_FEDERATED_TABLE : ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST; - my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", - mysql_errno(mysql), mysql_error(mysql))); - - my_error(error_code, MYF(0), error_buffer); - goto error; + uchar c= *(uchar *) name; + if (!(clen= my_mbcharlen(system_charset_info, c))) + clen= 1; + if (clen == 1 && c == (uchar) quote_char && + (result= string->append("e_char, 1, system_charset_info))) + goto err; + if ((result= string->append(name, clen, string->charset()))) + goto err; } + result= string->append("e_char, 1, system_charset_info); } - error_code=0; + else + result= string->append(name, length, system_charset_info); -error: - mysql_close(mysql); - DBUG_RETURN(error_code); +err: + DBUG_RETURN(result); } @@ -548,45 +546,39 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) int buf_len; DBUG_ENTER("ha_federated parse_url_error"); - if (share->connection_string) - { - DBUG_PRINT("info", - ("error: parse_url. Returning error code %d \ - freeing share->connection_string %lx", - error_num, (long unsigned int) share->connection_string)); - my_free((gptr) share->connection_string, MYF(0)); - share->connection_string= 0; - } buf_len= min(table->s->connect_string.length, FEDERATED_QUERY_BUFFER_SIZE-1); strmake(buf, table->s->connect_string.str, buf_len); my_error(error_num, MYF(0), buf); DBUG_RETURN(error_num); } + /* retrieve server object which contains server meta-data from the system table given a server's name, set share connection parameter members */ -int get_connection(FEDERATED_SHARE *share) +int get_connection(MEM_ROOT *mem_root, FEDERATED_SHARE *share) { int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - FOREIGN_SERVER *server; - MYSQL *mysql_conn= 0; - MYSQL_RES *result= 0; - MYSQL_ROW row= 0; + FOREIGN_SERVER *server, server_buffer; DBUG_ENTER("ha_federated::get_connection"); + /* + get_server_by_name() clones the server if exists and allocates + copies of strings in the supplied mem_root + */ if (!(server= - get_server_by_name(share->connection_string))) + get_server_by_name(mem_root, share->connection_string, &server_buffer))) { DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!")); /* need to come up with error handling */ error_num=1; goto error; } - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", (long unsigned int) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %lx", + (long unsigned int) server)); /* Most of these should never be empty strings, error handling will @@ -595,29 +587,22 @@ int get_connection(FEDERATED_SHARE *share) except there are errors in the trace file of the share being overrun at the address of the share. */ - if (server->server_name) - share->server_name= server->server_name; - share->server_name_length= server->server_name_length ? - server->server_name_length : 0; - if (server->username) - share->username= server->username; - if (server->password) - share->password= server->password; - if (server->db) - share->database= server->db; - - share->port= server->port ? (ushort) server->port : MYSQL_PORT; - - if (server->host) - share->hostname= server->host; - if (server->socket) - share->socket= server->socket; - else if (strcmp(share->hostname, my_localhost) == 0) - share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0)); - if (server->scheme) - share->scheme= server->scheme; - else - share->scheme= NULL; + share->server_name_length= server->server_name_length; + share->server_name= server->server_name; + share->username= server->username; + share->password= server->password; + share->database= server->db; +#ifndef I_AM_PARANOID + share->port= server->port > 0 && server->port < 65536 ? +#else + share->port= server->port > 1023 && server->port < 65536 ? +#endif + (ushort) server->port : MYSQL_PORT; + share->hostname= server->host; + if (!(share->socket= server->socket) && + !strcmp(share->hostname, my_localhost)) + share->socket= (char *) MYSQL_UNIX_ADDR; + share->scheme= server->scheme; DBUG_PRINT("info", ("share->username %s", share->username)); DBUG_PRINT("info", ("share->password %s", share->password)); @@ -640,6 +625,7 @@ error: SYNOPSIS parse_url() + mem_root MEM_ROOT pointer for memory allocation share pointer to FEDERATED share table pointer to current TABLE class table_create_flag determines what error to throw @@ -689,7 +675,7 @@ error: */ -static int parse_url(FEDERATED_SHARE *share, TABLE *table, +static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) { uint error_num= (table_create_flag ? @@ -700,23 +686,22 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->port= 0; share->socket= 0; DBUG_PRINT("info", ("share at %lx", (long unsigned int) share)); - DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length)); - DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length, + DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length)); + DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length, table->s->connect_string.str)); - share->connection_string= my_strndup(table->s->connect_string.str, - table->s->connect_string.length, - MYF(0)); + share->connection_string= strmake_root(mem_root, table->s->connect_string.str, + table->s->connect_string.length); - // Add a null for later termination of table name - share->connection_string[table->s->connect_string.length]= 0; DBUG_PRINT("info",("parse_url alloced share->connection_string %lx", (long unsigned int) share->connection_string)); DBUG_PRINT("info",("share->connection_string %s",share->connection_string)); - /* No delimiters, must be a straight connection name */ - if ( (!strchr(share->connection_string, '/')) && - (!strchr(share->connection_string, '@')) && - (!strchr(share->connection_string, ';'))) + /* + No :// or @ in connection string. Must be a straight connection name of + either "servername" or "servername/tablename" + */ + if ( (!strstr(share->connection_string, "://") && + (!strchr(share->connection_string, '@')))) { DBUG_PRINT("info", @@ -725,17 +710,51 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->connection_string, (long unsigned int) share->connection_string)); + /* ok, so we do a little parsing, but not completely! */ share->parsed= FALSE; - if ((error_num= get_connection(share))) - goto error; + /* + If there is a single '/' in the connection string, this means the user is + specifying a table name + */ + + if ((share->table_name= strchr(share->connection_string, '/'))) + { + share->connection_string[share->table_name - share->connection_string]= '\0'; + share->table_name++; + share->table_name_length= strlen(share->table_name); + + DBUG_PRINT("info", + ("internal format, parsed table_name share->connection_string \ + %s share->table_name %s", + share->connection_string, share->table_name)); + /* + there better not be any more '/'s ! + */ + if (strchr(share->table_name, '/')) + goto error; + + } /* - connection specifies everything but, resort to - expecting remote and foreign table names to match + otherwise, straight server name, use tablename of federated table + as remote table name */ - share->table_name= table->s->table_name.str; - share->table_name_length= table->s->table_name.length; - share->table_name[share->table_name_length]= '\0'; + else + { + /* + connection specifies everything but, resort to + expecting remote and foreign table names to match + */ + share->table_name= strmake_root(mem_root, table->s->table_name.str, + (share->table_name_length= table->s->table_name.length)); + DBUG_PRINT("info", + ("internal format, default table_name share->connection_string \ + %s share->table_name %s", + share->connection_string, share->table_name)); + } + + if ((error_num= get_connection(mem_root, share))) + goto error; } else { @@ -821,7 +840,7 @@ Then password is a null string, so set to NULL if (!share->port) { if (strcmp(share->hostname, my_localhost) == 0) - share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0)); + share->socket= (char *) MYSQL_UNIX_ADDR; else share->port= MYSQL_PORT; } @@ -849,6 +868,7 @@ ha_federated::ha_federated(handlerton *hton, mysql(0), stored_result(0) { trx_next= 0; + bzero(&bulk_insert, sizeof(bulk_insert)); } @@ -872,7 +892,7 @@ ha_federated::ha_federated(handlerton *hton, 0 After fields have had field values stored from record */ -uint ha_federated::convert_row_to_internal_format(byte *record, +uint ha_federated::convert_row_to_internal_format(uchar *record, MYSQL_ROW row, MYSQL_RES *result) { @@ -911,16 +931,15 @@ uint ha_federated::convert_row_to_internal_format(byte *record, static bool emit_key_part_name(String *to, KEY_PART_INFO *part) { DBUG_ENTER("emit_key_part_name"); - if (to->append(STRING_WITH_LEN("`")) || - to->append(part->field->field_name) || - to->append(STRING_WITH_LEN("`"))) + if (append_ident(to, part->field->field_name, + strlen(part->field->field_name), ident_quote_char)) DBUG_RETURN(1); // Out of memory DBUG_RETURN(0); } static bool emit_key_part_element(String *to, KEY_PART_INFO *part, bool needs_quotes, bool is_like, - const byte *ptr, uint len) + const uchar *ptr, uint len) { Field *field= part->field; DBUG_ENTER("emit_key_part_element"); @@ -961,7 +980,7 @@ static bool emit_key_part_element(String *to, KEY_PART_INFO *part, char strbuff[MAX_FIELD_WIDTH]; String str(strbuff, sizeof(strbuff), part->field->charset()), *res; - res= field->val_str(&str, (char *)ptr); + res= field->val_str(&str, ptr); if (field->result_type() == STRING_RESULT) { @@ -1223,12 +1242,12 @@ bool ha_federated::create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, - bool records_in_range, + bool from_records_in_range, bool eq_range) { bool both_not_null= (start_key != NULL && end_key != NULL) ? TRUE : FALSE; - const byte *ptr; + const uchar *ptr; uint remainder, length; char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); @@ -1267,7 +1286,7 @@ bool ha_federated::create_where_from_key(String *to, uint store_length= key_part->store_length; uint part_length= min(store_length, length); needs_quotes= field->str_needs_quotes(); - DBUG_DUMP("key, start of loop", (char *) ptr, length); + DBUG_DUMP("key, start of loop", ptr, length); if (key_part->null_bit) { @@ -1294,7 +1313,7 @@ bool ha_federated::create_where_from_key(String *to, if (emit_key_part_name(&tmp, key_part)) goto err; - if (records_in_range) + if (from_records_in_range) { if (tmp.append(STRING_WITH_LEN(" >= "))) goto err; @@ -1425,27 +1444,31 @@ err: static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { - char *select_query; char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; Field **field; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); FEDERATED_SHARE *share= NULL, tmp_share; + MEM_ROOT mem_root; + DBUG_ENTER("ha_federated.cc::get_share"); + /* In order to use this string, we must first zero it's length, or it will contain garbage */ query.length(0); + init_alloc_root(&mem_root, 256, 0); + pthread_mutex_lock(&federated_mutex); tmp_share.share_key= table_name; tmp_share.share_key_length= strlen(table_name); - if (parse_url(&tmp_share, table, 0)) + if (parse_url(&mem_root, &tmp_share, table, 0)) goto error; /* TODO: change tmp_share.scheme to LEX_STRING object */ if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables, - (byte*) tmp_share.share_key, + (uchar*) tmp_share.share_key, tmp_share. share_key_length))) { @@ -1453,51 +1476,45 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.append(STRING_WITH_LEN("SELECT ")); for (field= table->field; *field; field++) { - query.append(STRING_WITH_LEN("`")); - query.append((*field)->field_name); - query.append(STRING_WITH_LEN("`, ")); + append_ident(&query, (*field)->field_name, + strlen((*field)->field_name), ident_quote_char); + query.append(STRING_WITH_LEN(", ")); } /* chops off trailing comma */ query.length(query.length() - sizeof_trailing_comma); - query.append(STRING_WITH_LEN(" FROM `")); + query.append(STRING_WITH_LEN(" FROM ")); - if (!(share= (FEDERATED_SHARE *) - my_multi_malloc(MYF(MY_WME), - &share, sizeof(*share), - &select_query, - query.length()+table->s->connect_string.length+1, - NullS))) - goto error; + append_ident(&query, tmp_share.table_name, + tmp_share.table_name_length, ident_quote_char); - memcpy(share, &tmp_share, sizeof(tmp_share)); + if (!(share= (FEDERATED_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) || + !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length() + 1))) + goto error; - share->table_name_length= strlen(share->table_name); - /* TODO: share->table_name to LEX_STRING object */ - query.append(share->table_name, share->table_name_length); - query.append(STRING_WITH_LEN("`")); - share->select_query= select_query; - strmov(share->select_query, query.ptr()); share->use_count= 0; + share->mem_root= mem_root; + DBUG_PRINT("info", ("share->select_query %s", share->select_query)); - if (my_hash_insert(&federated_open_tables, (byte*) share)) + if (my_hash_insert(&federated_open_tables, (uchar*) share)) goto error; thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST); } + else + free_root(&mem_root, MYF(0)); /* prevents memory leak */ + share->use_count++; pthread_mutex_unlock(&federated_mutex); - return share; + DBUG_RETURN(share); error: pthread_mutex_unlock(&federated_mutex); - my_free((gptr) tmp_share.connection_string, MYF(MY_ALLOW_ZERO_PTR)); - tmp_share.connection_string= 0; - my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); - return NULL; + free_root(&mem_root, MYF(0)); + DBUG_RETURN(NULL); } @@ -1509,23 +1526,16 @@ error: static int free_share(FEDERATED_SHARE *share) { + MEM_ROOT mem_root= share->mem_root; DBUG_ENTER("free_share"); pthread_mutex_lock(&federated_mutex); if (!--share->use_count) { - hash_delete(&federated_open_tables, (byte*) share); - if (share->parsed) - my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); - /*if (share->connection_string) - { - */ - my_free((gptr) share->connection_string, MYF(MY_ALLOW_ZERO_PTR)); - share->connection_string= 0; - /*}*/ + hash_delete(&federated_open_tables, (uchar*) share); thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); - my_free((gptr) share, MYF(0)); + free_root(&mem_root, MYF(0)); } pthread_mutex_unlock(&federated_mutex); @@ -1534,7 +1544,7 @@ static int free_share(FEDERATED_SHARE *share) ha_rows ha_federated::records_in_range(uint inx, key_range *start_key, - key_range *end_key) + key_range *end_key) { /* @@ -1582,42 +1592,15 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(1); thr_lock_data_init(&share->lock, &lock, NULL); - /* Connect to foreign database mysql_real_connect() */ - mysql= mysql_init(0); - - /* - BUG# 17044 Federated Storage Engine is not UTF8 clean - Add set names to whatever charset the table is at open - of table - */ - /* this sets the csname like 'set names utf8' */ - mysql_options(mysql,MYSQL_SET_CHARSET_NAME, - this->table->s->table_charset->csname); - - if (!mysql || !mysql_real_connect(mysql, - share->hostname, - share->username, - share->password, - share->database, - share->port, - share->socket, 0)) - { - free_share(share); - DBUG_RETURN(stash_remote_error()); - } - /* - Since we do not support transactions at this version, we can let the client - API silently reconnect. For future versions, we will need more logic to - deal with transactions - */ - - mysql->reconnect= 1; + DBUG_ASSERT(mysql == NULL); ref_length= (table->s->primary_key != MAX_KEY ? table->key_info[table->s->primary_key].key_length : table->s->reclength); DBUG_PRINT("info", ("ref_length: %u", ref_length)); + reset(); + DBUG_RETURN(0); } @@ -1645,8 +1628,8 @@ int ha_federated::close(void) stored_result= 0; } /* Disconnect from mysql */ - if (mysql) // QQ is this really needed - mysql_close(mysql); + mysql_close(mysql); + mysql= NULL; retval= free_share(share); DBUG_RETURN(retval); @@ -1690,85 +1673,106 @@ static inline uint field_in_record_is_null(TABLE *table, } -/* - write_row() inserts a row. No extra() hint is given currently if a bulk load - is happeneding. buf() is a byte array of data. You can use the field - information to extract the data from the native byte array type. - Example of this would be: - for (Field **field=table->field ; *field ; field++) - { - ... - } - - Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, - sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +/** + @brief Construct the INSERT statement. + + @details This method will construct the INSERT statement and appends it to + the supplied query string buffer. + + @return + @retval FALSE No error + @retval TRUE Failure */ -int ha_federated::write_row(byte *buf) +bool ha_federated::append_stmt_insert(String *query) { - /* - I need a bool again, in 5.0, I used table->s->fields to accomplish this. - This worked as a flag that says there are fields with values or not. - In 5.1, this value doesn't work the same, and I end up with the code - truncating open parenthesis: - - the statement "INSERT INTO t1 VALUES ()" ends up being first built - in two strings - "INSERT INTO t1 (" - and - " VALUES (" - - If there are fields with values, they get appended, with commas, and - the last loop, a trailing comma is there - - "INSERT INTO t1 ( col1, col2, colN, " - - " VALUES ( 'val1', 'val2', 'valN', " - - Then, if there are fields, it should decrement the string by ", " length. - - "INSERT INTO t1 ( col1, col2, colN" - " VALUES ( 'val1', 'val2', 'valN'" + char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + Field **field; + uint tmp_length; + bool added_field= FALSE; - Then it adds a close paren to both - if there are fields + /* The main insert query string */ + String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::append_stmt_insert"); - "INSERT INTO t1 ( col1, col2, colN)" - " VALUES ( 'val1', 'val2', 'valN')" + insert_string.length(0); - Then appends both together - "INSERT INTO t1 ( col1, col2, colN) VALUES ( 'val1', 'val2', 'valN')" + if (replace_duplicates) + insert_string.append(STRING_WITH_LEN("REPLACE INTO ")); + else if (ignore_duplicates && !insert_dup_update) + insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO ")); + else + insert_string.append(STRING_WITH_LEN("INSERT INTO ")); + append_ident(&insert_string, share->table_name, share->table_name_length, + ident_quote_char); + tmp_length= insert_string.length(); + insert_string.append(STRING_WITH_LEN(" (")); - So... the problem, is if you have the original statement: + /* + loop through the field pointer array, add any fields to both the values + list and the fields list that match the current query id + */ + for (field= table->field; *field; field++) + { + if (bitmap_is_set(table->write_set, (*field)->field_index)) + { + /* append the field name */ + append_ident(&insert_string, (*field)->field_name, + strlen((*field)->field_name), ident_quote_char); - "INSERT INTO t1 VALUES ()" + /* append commas between both fields and fieldnames */ + /* + unfortunately, we can't use the logic if *(fields + 1) to + make the following appends conditional as we don't know if the + next field is in the write set + */ + insert_string.append(STRING_WITH_LEN(", ")); + added_field= TRUE; + } + } - Which is legitimate, but if the code thinks there are fields + if (added_field) + { + /* Remove trailing comma. */ + insert_string.length(insert_string.length() - sizeof_trailing_comma); + insert_string.append(STRING_WITH_LEN(") ")); + } + else + { + /* If there were no fields, we don't want to add a closing paren. */ + insert_string.length(tmp_length); + } - "INSERT INTO t1 (" - " VALUES ( " + insert_string.append(STRING_WITH_LEN(" VALUES ")); - If the field flag is set, but there are no commas, reduces the - string by strlen(", ") + DBUG_RETURN(query->append(insert_string)); +} - "INSERT INTO t1 " - " VALUES " - Then adds the close parenthesis +/* + write_row() inserts a row. No extra() hint is given currently if a bulk load + is happeneding. buf() is a byte array of data. You can use the field + information to extract the data from the native byte array type. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } - "INSERT INTO t1 )" - " VALUES )" + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +*/ - So, I have to use a bool as before, set in the loop where fields and commas - are appended to the string - */ - my_bool commas_added= FALSE; - char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE]; +int ha_federated::write_row(uchar *buf) +{ char values_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE]; Field **field; + uint tmp_length; + int error= 0; + bool use_bulk_insert; + bool auto_increment_update_required= (table->next_number_field != NULL); - /* The main insert query string */ - String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); /* The string containing the values to be added to the insert */ String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin); /* The actual value of the field, to be added to the values_string */ @@ -1779,22 +1783,26 @@ int ha_federated::write_row(byte *buf) DBUG_ENTER("ha_federated::write_row"); values_string.length(0); - insert_string.length(0); insert_field_value_string.length(0); - statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); /* start both our field and field values strings + We must disable multi-row insert for "INSERT...ON DUPLICATE KEY UPDATE" + Ignore duplicates is always true when insert_dup_update is true. + When replace_duplicates == TRUE, we can safely enable multi-row insert. + When performing multi-row insert, we only collect the columns values for + the row. The start of the statement is only created when the first + row is copied in to the bulk_insert string. */ - insert_string.append(STRING_WITH_LEN("INSERT INTO `")); - insert_string.append(share->table_name, share->table_name_length); - insert_string.append('`'); - insert_string.append(STRING_WITH_LEN(" (")); + if (!(use_bulk_insert= bulk_insert.str && + (!insert_dup_update || replace_duplicates))) + append_stmt_insert(&values_string); - values_string.append(STRING_WITH_LEN(" VALUES ")); values_string.append(STRING_WITH_LEN(" (")); + tmp_length= values_string.length(); /* loop through the field pointer array, add any fields to both the values @@ -1804,7 +1812,6 @@ int ha_federated::write_row(byte *buf) { if (bitmap_is_set(table->write_set, (*field)->field_index)) { - commas_added= TRUE; if ((*field)->is_null()) values_string.append(STRING_WITH_LEN(" NULL ")); else @@ -1812,15 +1819,13 @@ int ha_federated::write_row(byte *buf) bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&insert_field_value_string); if (needs_quote) - values_string.append('\''); + values_string.append(value_quote_char); insert_field_value_string.print(&values_string); if (needs_quote) - values_string.append('\''); + values_string.append(value_quote_char); insert_field_value_string.length(0); } - /* append the field name */ - insert_string.append((*field)->field_name); /* append commas between both fields and fieldnames */ /* @@ -1828,7 +1833,6 @@ int ha_federated::write_row(byte *buf) make the following appends conditional as we don't know if the next field is in the write set */ - insert_string.append(STRING_WITH_LEN(", ")); values_string.append(STRING_WITH_LEN(", ")); } } @@ -1839,26 +1843,52 @@ int ha_federated::write_row(byte *buf) AND, we don't want to chop off the last char '(' insert will be "INSERT INTO t1 VALUES ();" */ - if (commas_added) + if (values_string.length() > tmp_length) { - insert_string.length(insert_string.length() - sizeof_trailing_comma); - /* chops off leading commas */ + /* chops off trailing comma */ values_string.length(values_string.length() - sizeof_trailing_comma); - insert_string.append(STRING_WITH_LEN(") ")); } - else - { - /* chops off trailing ) */ - insert_string.length(insert_string.length() - sizeof_trailing_closeparen); - } - /* we always want to append this, even if there aren't any fields */ values_string.append(STRING_WITH_LEN(") ")); - /* add the values */ - insert_string.append(values_string); + if (use_bulk_insert) + { + /* + Send the current bulk insert out if appending the current row would + cause the statement to overflow the packet size, otherwise set + auto_increment_update_required to FALSE as no query was executed. + */ + if (bulk_insert.length + values_string.length() + bulk_padding > + mysql->net.max_packet_size && bulk_insert.length) + { + error= real_query(bulk_insert.str, bulk_insert.length); + bulk_insert.length= 0; + } + else + auto_increment_update_required= FALSE; + + if (bulk_insert.length == 0) + { + char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + String insert_string(insert_buffer, sizeof(insert_buffer), + &my_charset_bin); + insert_string.length(0); + append_stmt_insert(&insert_string); + dynstr_append_mem(&bulk_insert, insert_string.ptr(), + insert_string.length()); + } + else + dynstr_append_mem(&bulk_insert, ",", 1); - if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length())) + dynstr_append_mem(&bulk_insert, values_string.ptr(), + values_string.length()); + } + else + { + error= real_query(values_string.ptr(), values_string.length()); + } + + if (error) { DBUG_RETURN(stash_remote_error()); } @@ -1866,12 +1896,91 @@ int ha_federated::write_row(byte *buf) If the table we've just written a record to contains an auto_increment field, then store the last_insert_id() value from the foreign server */ - if (table->next_number_field) + if (auto_increment_update_required) + { update_auto_increment(); + /* mysql_insert() uses this for protocol return value */ + table->next_number_field->store(stats.auto_increment_value, 1); + } + DBUG_RETURN(0); } + +/** + @brief Prepares the storage engine for bulk inserts. + + @param[in] rows estimated number of rows in bulk insert + or 0 if unknown. + + @details Initializes memory structures required for bulk insert. +*/ + +void ha_federated::start_bulk_insert(ha_rows rows) +{ + uint page_size; + DBUG_ENTER("ha_federated::start_bulk_insert"); + + dynstr_free(&bulk_insert); + + /** + We don't bother with bulk-insert semantics when the estimated rows == 1 + The rows value will be 0 if the server does not know how many rows + would be inserted. This can occur when performing INSERT...SELECT + */ + + if (rows == 1) + DBUG_VOID_RETURN; + + /* + Make sure we have an open connection so that we know the + maximum packet size. + */ + if (!mysql && real_connect()) + DBUG_VOID_RETURN; + + page_size= (uint) my_getpagesize(); + + if (init_dynamic_string(&bulk_insert, NULL, page_size, page_size)) + DBUG_VOID_RETURN; + + bulk_insert.length= 0; + DBUG_VOID_RETURN; +} + + +/** + @brief End bulk insert. + + @details This method will send any remaining rows to the remote server. + Finally, it will deinitialize the bulk insert data structure. + + @return Operation status + @retval 0 No error + @retval != 0 Error occured at remote server. Also sets my_errno. +*/ + +int ha_federated::end_bulk_insert() +{ + int error= 0; + DBUG_ENTER("ha_federated::end_bulk_insert"); + + if (bulk_insert.str && bulk_insert.length) + { + if (real_query(bulk_insert.str, bulk_insert.length)) + error= stash_remote_error(); + else + if (table->next_number_field) + update_auto_increment(); + } + + dynstr_free(&bulk_insert); + + DBUG_RETURN(my_errno= error); +} + + /* ha_federated::update_auto_increment @@ -1885,8 +1994,9 @@ void ha_federated::update_auto_increment(void) THD *thd= current_thd; DBUG_ENTER("ha_federated::update_auto_increment"); + ha_federated::info(HA_STATUS_AUTO); thd->first_successful_insert_id_in_cur_stmt= - mysql->last_used_con->insert_id; + stats.auto_increment_value; DBUG_PRINT("info",("last_insert_id: %ld", (long) stats.auto_increment_value)); DBUG_VOID_RETURN; @@ -1901,11 +2011,11 @@ int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt) query.length(0); query.set_charset(system_charset_info); - query.append(STRING_WITH_LEN("OPTIMIZE TABLE `")); - query.append(share->table_name, share->table_name_length); - query.append(STRING_WITH_LEN("`")); + query.append(STRING_WITH_LEN("OPTIMIZE TABLE ")); + append_ident(&query, share->table_name, share->table_name_length, + ident_quote_char); - if (mysql_real_query(mysql, query.ptr(), query.length())) + if (real_query(query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); } @@ -1923,9 +2033,9 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) query.length(0); query.set_charset(system_charset_info); - query.append(STRING_WITH_LEN("REPAIR TABLE `")); - query.append(share->table_name, share->table_name_length); - query.append(STRING_WITH_LEN("`")); + query.append(STRING_WITH_LEN("REPAIR TABLE ")); + append_ident(&query, share->table_name, share->table_name_length, + ident_quote_char); if (check_opt->flags & T_QUICK) query.append(STRING_WITH_LEN(" QUICK")); if (check_opt->flags & T_EXTEND) @@ -1933,7 +2043,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) if (check_opt->sql_flags & TT_USEFRM) query.append(STRING_WITH_LEN(" USE_FRM")); - if (mysql_real_query(mysql, query.ptr(), query.length())) + if (real_query(query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); } @@ -1959,7 +2069,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. */ -int ha_federated::update_row(const byte *old_data, byte *new_data) +int ha_federated::update_row(const uchar *old_data, uchar *new_data) { /* This used to control how the query was built. If there was a @@ -1993,7 +2103,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin); - byte *record= table->record[0]; + uchar *record= table->record[0]; DBUG_ENTER("ha_federated::update_row"); /* set string lengths to 0 to avoid misc chars in string @@ -2002,9 +2112,13 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) update_string.length(0); where_string.length(0); - update_string.append(STRING_WITH_LEN("UPDATE `")); - update_string.append(share->table_name); - update_string.append(STRING_WITH_LEN("` SET ")); + if (ignore_duplicates) + update_string.append(STRING_WITH_LEN("UPDATE IGNORE ")); + else + update_string.append(STRING_WITH_LEN("UPDATE ")); + append_ident(&update_string, share->table_name, + share->table_name_length, ident_quote_char); + update_string.append(STRING_WITH_LEN(" SET ")); /* In this loop, we want to match column names to values being inserted @@ -2020,7 +2134,9 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) { if (bitmap_is_set(table->write_set, (*field)->field_index)) { - update_string.append((*field)->field_name); + uint field_name_length= strlen((*field)->field_name); + append_ident(&update_string, (*field)->field_name, field_name_length, + ident_quote_char); update_string.append(STRING_WITH_LEN(" = ")); if ((*field)->is_null()) @@ -2032,10 +2148,10 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) - update_string.append('\''); + update_string.append(value_quote_char); field_value.print(&update_string); if (needs_quote) - update_string.append('\''); + update_string.append(value_quote_char); field_value.length(0); tmp_restore_column_map(table->read_set, old_map); } @@ -2044,7 +2160,9 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) if (bitmap_is_set(table->read_set, (*field)->field_index)) { - where_string.append((*field)->field_name); + uint field_name_length= strlen((*field)->field_name); + append_ident(&where_string, (*field)->field_name, field_name_length, + ident_quote_char); if (field_in_record_is_null(table, *field, (char*) old_data)) where_string.append(STRING_WITH_LEN(" IS NULL ")); else @@ -2052,12 +2170,12 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) bool needs_quote= (*field)->str_needs_quotes(); where_string.append(STRING_WITH_LEN(" = ")); (*field)->val_str(&field_value, - (char*) (old_data + (*field)->offset(record))); + (old_data + (*field)->offset(record))); if (needs_quote) - where_string.append('\''); + where_string.append(value_quote_char); field_value.print(&where_string); if (needs_quote) - where_string.append('\''); + where_string.append(value_quote_char); field_value.length(0); } where_string.append(STRING_WITH_LEN(" AND ")); @@ -2082,7 +2200,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) if (!has_a_primary_key) update_string.append(STRING_WITH_LEN(" LIMIT 1")); - if (mysql_real_query(mysql, update_string.ptr(), update_string.length())) + if (real_query(update_string.ptr(), update_string.length())) { DBUG_RETURN(stash_remote_error()); } @@ -2104,7 +2222,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) calls. */ -int ha_federated::delete_row(const byte *buf) +int ha_federated::delete_row(const uchar *buf) { char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char data_buffer[FEDERATED_QUERY_BUFFER_SIZE]; @@ -2114,9 +2232,10 @@ int ha_federated::delete_row(const byte *buf) DBUG_ENTER("ha_federated::delete_row"); delete_string.length(0); - delete_string.append(STRING_WITH_LEN("DELETE FROM `")); - delete_string.append(share->table_name); - delete_string.append(STRING_WITH_LEN("` WHERE ")); + delete_string.append(STRING_WITH_LEN("DELETE FROM ")); + append_ident(&delete_string, share->table_name, + share->table_name_length, ident_quote_char); + delete_string.append(STRING_WITH_LEN(" WHERE ")); for (Field **field= table->field; *field; field++) { @@ -2124,8 +2243,9 @@ int ha_federated::delete_row(const byte *buf) found++; if (bitmap_is_set(table->read_set, cur_field->field_index)) { + append_ident(&delete_string, (*field)->field_name, + strlen((*field)->field_name), ident_quote_char); data_string.length(0); - delete_string.append(cur_field->field_name); if (cur_field->is_null()) { delete_string.append(STRING_WITH_LEN(" IS NULL ")); @@ -2136,10 +2256,10 @@ int ha_federated::delete_row(const byte *buf) delete_string.append(STRING_WITH_LEN(" = ")); cur_field->val_str(&data_string); if (needs_quote) - delete_string.append('\''); + delete_string.append(value_quote_char); data_string.print(&delete_string); if (needs_quote) - delete_string.append('\''); + delete_string.append(value_quote_char); } delete_string.append(STRING_WITH_LEN(" AND ")); } @@ -2153,7 +2273,7 @@ int ha_federated::delete_row(const byte *buf) delete_string.append(STRING_WITH_LEN(" LIMIT 1")); DBUG_PRINT("info", ("Delete sql: %s", delete_string.c_ptr_quick())); - if (mysql_real_query(mysql, delete_string.ptr(), delete_string.length())) + if (real_query(delete_string.ptr(), delete_string.length())) { DBUG_RETURN(stash_remote_error()); } @@ -2174,7 +2294,7 @@ int ha_federated::delete_row(const byte *buf) a WHERE clause on a non-primary key index, simply calls index_read_idx. */ -int ha_federated::index_read(byte *buf, const byte *key, +int ha_federated::index_read(uchar *buf, const uchar *key, uint key_len, ha_rkey_function find_flag) { DBUG_ENTER("ha_federated::index_read"); @@ -2200,7 +2320,7 @@ int ha_federated::index_read(byte *buf, const byte *key, returns. We need to be able to be calable from ha_rnd_pos() */ -int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, +int ha_federated::index_read_idx(uchar *buf, uint index, const uchar *key, uint key_len, enum ha_rkey_function find_flag) { int retval; @@ -2226,8 +2346,8 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, table->status == STATUS_NOT_FOUND */ -int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, - const byte *key, +int ha_federated::index_read_idx_with_result_set(uchar *buf, uint index, + const uchar *key, uint key_len, ha_rkey_function find_flag, MYSQL_RES **result) @@ -2248,8 +2368,7 @@ int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, *result= 0; // In case of errors index_string.length(0); sql_query.length(0); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); sql_query.append(share->select_query); @@ -2262,7 +2381,7 @@ int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, NULL, 0, 0); sql_query.append(index_string); - if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) + if (real_query(sql_query.ptr(), sql_query.length())) { my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", mysql_errno(mysql), mysql_error(mysql))); @@ -2306,7 +2425,7 @@ int ha_federated::index_init(uint keynr, bool sorted) int ha_federated::read_range_first(const key_range *start_key, const key_range *end_key, - bool eq_range, bool sorted) + bool eq_range_arg, bool sorted) { char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; int retval; @@ -2321,14 +2440,14 @@ int ha_federated::read_range_first(const key_range *start_key, sql_query.append(share->select_query); create_where_from_key(&sql_query, &table->key_info[active_index], - start_key, end_key, 0, eq_range); + start_key, end_key, 0, eq_range_arg); if (stored_result) { mysql_free_result(stored_result); stored_result= 0; } - if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) + if (real_query(sql_query.ptr(), sql_query.length())) { retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; goto error; @@ -2360,11 +2479,10 @@ int ha_federated::read_range_next() /* Used to read forward through the index. */ -int ha_federated::index_next(byte *buf) +int ha_federated::index_next(uchar *buf) { DBUG_ENTER("ha_federated::index_next"); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); DBUG_RETURN(read_next(buf, stored_result)); } @@ -2428,9 +2546,7 @@ int ha_federated::rnd_init(bool scan) stored_result= 0; } - if (mysql_real_query(mysql, - share->select_query, - strlen(share->select_query))) + if (real_query(share->select_query, strlen(share->select_query))) goto error; stored_result= mysql_store_result(mysql); @@ -2474,7 +2590,7 @@ int ha_federated::index_end(void) sql_table.cc, and sql_update.cc. */ -int ha_federated::rnd_next(byte *buf) +int ha_federated::rnd_next(uchar *buf) { DBUG_ENTER("ha_federated::rnd_next"); @@ -2511,7 +2627,7 @@ int ha_federated::rnd_next(byte *buf) 0 no error */ -int ha_federated::read_next(byte *buf, MYSQL_RES *result) +int ha_federated::read_next(uchar *buf, MYSQL_RES *result) { int retval; MYSQL_ROW row; @@ -2540,11 +2656,11 @@ int ha_federated::read_next(byte *buf, MYSQL_RES *result) Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. */ -void ha_federated::position(const byte *record) +void ha_federated::position(const uchar *record) { DBUG_ENTER("ha_federated::position"); if (table->s->primary_key != MAX_KEY) - key_copy(ref, (byte *)record, table->key_info + table->s->primary_key, + key_copy(ref, (uchar *)record, table->key_info + table->s->primary_key, ref_length); else memcpy(ref, record, ref_length); @@ -2561,12 +2677,11 @@ void ha_federated::position(const byte *record) Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. */ -int ha_federated::rnd_pos(byte *buf, byte *pos) +int ha_federated::rnd_pos(uchar *buf, uchar *pos) { int result; DBUG_ENTER("ha_federated::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); if (table->s->primary_key != MAX_KEY) { /* We have a primary key, so use index_read_idx to find row */ @@ -2632,7 +2747,6 @@ int ha_federated::info(uint flag) { char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char status_buf[FEDERATED_QUERY_BUFFER_SIZE]; - char escaped_table_name[FEDERATED_QUERY_BUFFER_SIZE]; int error; uint error_code; MYSQL_RES *result= 0; @@ -2645,16 +2759,11 @@ int ha_federated::info(uint flag) if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST)) { status_query_string.length(0); - status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE '")); - escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name, - sizeof(escaped_table_name), - share->table_name, - share->table_name_length); - status_query_string.append(escaped_table_name); - status_query_string.append(STRING_WITH_LEN("'")); - - if (mysql_real_query(mysql, status_query_string.ptr(), - status_query_string.length())) + status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE ")); + append_ident(&status_query_string, share->table_name, + share->table_name_length, value_quote_char); + + if (real_query(status_query_string.ptr(), status_query_string.length())) goto error; status_query_string.length(0); @@ -2706,22 +2815,90 @@ int ha_federated::info(uint flag) } - if (result) - mysql_free_result(result); + if (flag & HA_STATUS_AUTO) + stats.auto_increment_value= mysql->last_used_con->insert_id; + + mysql_free_result(result); DBUG_RETURN(0); error: - if (result) - mysql_free_result(result); - - my_sprintf(error_buffer, (error_buffer, ": %d : %s", - mysql_errno(mysql), mysql_error(mysql))); - my_error(error_code, MYF(0), error_buffer); + mysql_free_result(result); + if (mysql) + { + my_sprintf(error_buffer, (error_buffer, ": %d : %s", + mysql_errno(mysql), mysql_error(mysql))); + my_error(error_code, MYF(0), error_buffer); + } + else + if (remote_error_number != -1 /* error already reported */) + { + error_code= remote_error_number; + my_error(error_code, MYF(0), ER(error_code)); + } DBUG_RETURN(error_code); } +/** + @brief Handles extra signals from MySQL server + + @param[in] operation Hint for storage engine + + @return Operation Status + @retval 0 OK + */ +int ha_federated::extra(ha_extra_function operation) +{ + DBUG_ENTER("ha_federated::extra"); + switch (operation) { + case HA_EXTRA_IGNORE_DUP_KEY: + ignore_duplicates= TRUE; + break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + insert_dup_update= FALSE; + ignore_duplicates= FALSE; + break; + case HA_EXTRA_WRITE_CAN_REPLACE: + replace_duplicates= TRUE; + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + /* + We use this flag to ensure that we do not create an "INSERT IGNORE" + statement when inserting new rows into the remote table. + */ + replace_duplicates= FALSE; + break; + case HA_EXTRA_INSERT_WITH_UPDATE: + insert_dup_update= TRUE; + break; + default: + /* do nothing */ + DBUG_PRINT("info",("unhandled operation: %d", (uint) operation)); + } + DBUG_RETURN(0); +} + + +/** + @brief Reset state of file to after 'open'. + + @detail This function is called after every statement for all tables + used by that statement. + + @return Operation status + @retval 0 OK +*/ + +int ha_federated::reset(void) +{ + insert_dup_update= FALSE; + ignore_duplicates= FALSE; + replace_duplicates= FALSE; + return 0; +} + + /* Used to delete all rows in a table. Both for cases of truncate and for cases where the optimizer realizes that all rows will be @@ -2743,14 +2920,14 @@ int ha_federated::delete_all_rows() query.length(0); query.set_charset(system_charset_info); - query.append(STRING_WITH_LEN("TRUNCATE `")); - query.append(share->table_name); - query.append(STRING_WITH_LEN("`")); + query.append(STRING_WITH_LEN("TRUNCATE ")); + append_ident(&query, share->table_name, share->table_name_length, + ident_quote_char); /* TRUNCATE won't return anything in mysql_affected_rows */ - if (mysql_real_query(mysql, query.ptr(), query.length())) + if (real_query(query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); } @@ -2836,25 +3013,129 @@ int ha_federated::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { int retval; + THD *thd= current_thd; FEDERATED_SHARE tmp_share; // Only a temporary share, to test the url DBUG_ENTER("ha_federated::create"); - if (!(retval= parse_url(&tmp_share, table_arg, 1))) - retval= check_foreign_data_source(&tmp_share, 1); + retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1); - /* free this because strdup created it in parse_url */ - my_free((gptr) tmp_share.connection_string, MYF(MY_ALLOW_ZERO_PTR)); - tmp_share.connection_string= 0; DBUG_RETURN(retval); } +int ha_federated::real_connect() +{ + char buffer[FEDERATED_QUERY_BUFFER_SIZE]; + String sql_query(buffer, sizeof(buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::real_connect"); + + /* + Bug#25679 + Ensure that we do not hold the LOCK_open mutex while attempting + to establish Federated connection to guard against a trivial + Denial of Service scenerio. + */ + safe_mutex_assert_not_owner(&LOCK_open); + + DBUG_ASSERT(mysql == NULL); + + if (!(mysql= mysql_init(NULL))) + { + remote_error_number= HA_ERR_OUT_OF_MEM; + DBUG_RETURN(-1); + } + + /* + BUG# 17044 Federated Storage Engine is not UTF8 clean + Add set names to whatever charset the table is at open + of table + */ + /* this sets the csname like 'set names utf8' */ + mysql_options(mysql,MYSQL_SET_CHARSET_NAME, + this->table->s->table_charset->csname); + + sql_query.length(0); + + if (!mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) + { + stash_remote_error(); + mysql_close(mysql); + mysql= NULL; + my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), remote_error_buf); + remote_error_number= -1; + DBUG_RETURN(-1); + } + + /* + We have established a connection, lets try a simple dummy query just + to check that the table and expected columns are present. + */ + sql_query.append(share->select_query); + sql_query.append(STRING_WITH_LEN(" WHERE 1=0")); + if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) + { + sql_query.length(0); + sql_query.append("error: "); + sql_query.qs_append(mysql_errno(mysql)); + sql_query.append(" '"); + sql_query.append(mysql_error(mysql)); + sql_query.append("'"); + mysql_close(mysql); + mysql= NULL; + my_error(ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST, MYF(0), sql_query.ptr()); + remote_error_number= -1; + DBUG_RETURN(-1); + } + + /* Just throw away the result, no rows anyways but need to keep in sync */ + mysql_free_result(mysql_store_result(mysql)); + + /* + Since we do not support transactions at this version, we can let the client + API silently reconnect. For future versions, we will need more logic to + deal with transactions + */ + + mysql->reconnect= 1; + DBUG_RETURN(0); +} + + +int ha_federated::real_query(const char *query, uint length) +{ + int rc= 0; + DBUG_ENTER("ha_federated::real_query"); + + if (!mysql && (rc= real_connect())) + goto end; + + if (!query || !length) + goto end; + + rc= mysql_real_query(mysql, query, length); + +end: + DBUG_RETURN(rc); +} + + int ha_federated::stash_remote_error() { DBUG_ENTER("ha_federated::stash_remote_error()"); + if (!mysql) + DBUG_RETURN(remote_error_number); remote_error_number= mysql_errno(mysql); strmake(remote_error_buf, mysql_error(mysql), sizeof(remote_error_buf)-1); + if (remote_error_number == ER_DUP_ENTRY || + remote_error_number == ER_DUP_KEY) + DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); DBUG_RETURN(HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM); } @@ -2880,11 +3161,16 @@ bool ha_federated::get_error_message(int error, String* buf) int ha_federated::external_lock(THD *thd, int lock_type) { int error= 0; - ha_federated *trx= (ha_federated *)thd->ha_data[ht->slot]; DBUG_ENTER("ha_federated::external_lock"); + /* + Support for transactions disabled until WL#2952 fixes it. + */ +#ifdef XXX_SUPERCEDED_BY_WL2952 if (lock_type != F_UNLCK) { + ha_federated *trx= (ha_federated *)thd->ha_data[ht->slot]; + DBUG_PRINT("info",("federated not lock F_UNLCK")); if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { @@ -2936,7 +3222,8 @@ int ha_federated::external_lock(THD *thd, int lock_type) } } } - DBUG_RETURN(0); +#endif /* XXX_SUPERCEDED_BY_WL2952 */ + DBUG_RETURN(error); } @@ -3044,4 +3331,3 @@ mysql_declare_plugin(federated) NULL /* config options */ } mysql_declare_plugin_end; - diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h index bbc2b2fe9f8..40bcf9cc402 100644 --- a/storage/federated/ha_federated.h +++ b/storage/federated/ha_federated.h @@ -43,6 +43,8 @@ The example implements the minimum of what you will probably need. */ typedef struct st_federated_share { + MEM_ROOT mem_root; + bool parsed; /* this key is unique db/tablename */ const char *share_key; @@ -67,6 +69,7 @@ typedef struct st_federated_share { char *sport; int share_key_length; ushort port; + uint table_name_length, server_name_length, connect_string_length, use_count; pthread_mutex_t mutex; THR_LOCK lock; @@ -85,13 +88,16 @@ class ha_federated: public handler MYSQL_ROW_OFFSET current_position; // Current position used by ::position() int remote_error_number; char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE]; + bool ignore_duplicates, replace_duplicates; + bool insert_dup_update; + DYNAMIC_STRING bulk_insert; private: /* return 0 on success return errorcode otherwise */ - uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, + uint convert_row_to_internal_format(uchar *buf, MYSQL_ROW row, MYSQL_RES *result); bool create_where_from_key(String *to, KEY *key_info, const key_range *start_key, @@ -99,6 +105,16 @@ private: bool records_in_range, bool eq_range); int stash_remote_error(); + bool append_stmt_insert(String *query); + + int read_next(uchar *buf, MYSQL_RES *result); + int index_read_idx_with_result_set(uchar *buf, uint index, + const uchar *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result); + int real_query(const char *query, uint length); + int real_connect(); public: ha_federated(handlerton *hton, TABLE_SHARE *table_arg); ~ha_federated() {} @@ -125,7 +141,9 @@ public: /* fix server to be able to get remote server table flags */ return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | + HA_NO_TRANSACTIONS /* until fixed by WL#2952 */ | HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY); } /* @@ -147,6 +165,7 @@ public: uint max_supported_keys() const { return MAX_KEY; } uint max_supported_key_parts() const { return MAX_REF_PARTS; } uint max_supported_key_length() const { return FEDERATED_MAX_KEY_LENGTH; } + uint max_supported_key_part_length() const { return FEDERATED_MAX_KEY_LENGTH; } /* Called in test_quick_select to determine if indexes should be used. Normally, we need to know number of blocks . For federated we need to @@ -185,15 +204,17 @@ public: int open(const char *name, int mode, uint test_if_locked); // required int close(void); // required - int write_row(byte *buf); - int update_row(const byte *old_data, byte *new_data); - int delete_row(const byte *buf); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); + int write_row(uchar *buf); + int update_row(const uchar *old_data, uchar *new_data); + int delete_row(const uchar *buf); int index_init(uint keynr, bool sorted); - int index_read(byte *buf, const byte *key, + int index_read(uchar *buf, const uchar *key, uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte *buf, uint idx, const byte *key, + int index_read_idx(uchar *buf, uint idx, const uchar *key, uint key_len, enum ha_rkey_function find_flag); - int index_next(byte *buf); + int index_next(uchar *buf); int index_end(); int read_range_first(const key_range *start_key, const key_range *end_key, @@ -209,10 +230,11 @@ public: */ int rnd_init(bool scan); //required int rnd_end(); - int rnd_next(byte *buf); //required - int rnd_pos(byte *buf, byte *pos); //required - void position(const byte *record); //required + int rnd_next(uchar *buf); //required + int rnd_pos(uchar *buf, uchar *pos); //required + void position(const uchar *record); //required int info(uint); //required + int extra(ha_extra_function operation); void update_auto_increment(void); int repair(THD* thd, HA_CHECK_OPT* check_opt); @@ -227,18 +249,12 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); //required - virtual bool get_error_message(int error, String *buf); + bool get_error_message(int error, String *buf); int external_lock(THD *thd, int lock_type); int connection_commit(); int connection_rollback(); int connection_autocommit(bool state); int execute_simple_query(const char *query, int len); - - int read_next(byte *buf, MYSQL_RES *result); - int index_read_idx_with_result_set(byte *buf, uint index, - const byte *key, - uint key_len, - ha_rkey_function find_flag, - MYSQL_RES **result); + int reset(void); }; diff --git a/storage/heap/CMakeLists.txt b/storage/heap/CMakeLists.txt index 39953684b8f..fd3ce149b2c 100644..100755 --- a/storage/heap/CMakeLists.txt +++ b/storage/heap/CMakeLists.txt @@ -20,8 +20,13 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(heap _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c + +SET(HEAP_SOURCES _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c ha_heap.cc hp_delete.c hp_extra.c hp_hash.c hp_info.c hp_open.c hp_panic.c hp_rename.c hp_rfirst.c hp_rkey.c hp_rlast.c hp_rnext.c hp_rprev.c hp_rrnd.c hp_rsame.c hp_scan.c hp_static.c hp_update.c hp_write.c) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(heap ${HEAP_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/heap/_check.c b/storage/heap/_check.c index 05f12dade0d..08b6da62ae1 100644 --- a/storage/heap/_check.c +++ b/storage/heap/_check.c @@ -167,7 +167,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, HP_KEYDEF *keydef= info->s->keydef + keynr; int error= 0; ulong found= 0; - byte *key, *recpos; + uchar *key, *recpos; uint key_length; uint not_used[2]; @@ -176,7 +176,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, { do { - memcpy(&recpos, key + (*keydef->get_key_length)(keydef,key), sizeof(byte*)); + memcpy(&recpos, key + (*keydef->get_key_length)(keydef,key), sizeof(uchar*)); key_length= hp_rb_make_key(keydef, info->recbuf, recpos, 0); if (ha_key_cmp(keydef->seg, (uchar*) info->recbuf, (uchar*) key, key_length, SEARCH_FIND | SEARCH_SAME, not_used)) diff --git a/storage/heap/_rectest.c b/storage/heap/_rectest.c index 2fd2d39bed7..068fedf719c 100644 --- a/storage/heap/_rectest.c +++ b/storage/heap/_rectest.c @@ -18,7 +18,7 @@ #include "heapdef.h" -int hp_rectest(register HP_INFO *info, register const byte *old) +int hp_rectest(register HP_INFO *info, register const uchar *old) { DBUG_ENTER("hp_rectest"); diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 7a2f8e20c56..4934792de80 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -22,7 +22,7 @@ #include "mysql_priv.h" #include <mysql/plugin.h> #include "ha_heap.h" - +#include "heapdef.h" static handler *heap_create_handler(handlerton *hton, TABLE_SHARE *table, @@ -61,8 +61,8 @@ static handler *heap_create_handler(handlerton *hton, *****************************************************************************/ ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg) - :handler(hton, table_arg), file(0), records_changed(0), - key_stat_version(0) + :handler(hton, table_arg), file(0), records_changed(0), internal_table(0), + key_stat_version(0) {} @@ -90,13 +90,25 @@ const char **ha_heap::bas_ext() const int ha_heap::open(const char *name, int mode, uint test_if_locked) { - if (!(file= heap_open(name, mode)) && my_errno == ENOENT) + if ((test_if_locked & HA_OPEN_INTERNAL_TABLE) || + !(file= heap_open(name, mode)) && my_errno == ENOENT) { HA_CREATE_INFO create_info; + internal_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE); bzero(&create_info, sizeof(create_info)); + file= 0; if (!create(name, table, &create_info)) { - file= heap_open(name, mode); + file= internal_table ? + heap_open_from_share(internal_share, mode) : + heap_open_from_share_and_register(internal_share, mode); + if (!file) + { + /* Couldn't open table; Remove the newly created table */ + pthread_mutex_lock(&THR_LOCK_heap); + hp_free(internal_share); + pthread_mutex_unlock(&THR_LOCK_heap); + } implicit_emptied= 1; } } @@ -120,7 +132,27 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) int ha_heap::close(void) { - return heap_close(file); + return internal_table ? hp_close(file) : heap_close(file); +} + + +/* + Create a copy of this table + + DESCRIPTION + Do same as default implementation but use file->s->name instead of + table->s->path. This is needed by Windows where the clone() call sees + '/'-delimited path in table->s->path, while ha_peap::open() was called + with '\'-delimited path. +*/ + +handler *ha_heap::clone(MEM_ROOT *mem_root) +{ + handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); + if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + return new_handler; + return NULL; /* purecov: inspected */ } @@ -178,10 +210,10 @@ void ha_heap::update_key_stats() } -int ha_heap::write_row(byte * buf) +int ha_heap::write_row(uchar * buf) { int res; - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -202,10 +234,10 @@ int ha_heap::write_row(byte * buf) return res; } -int ha_heap::update_row(const byte * old_data, byte * new_data) +int ha_heap::update_row(const uchar * old_data, uchar * new_data) { int res; - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); res= heap_update(file,old_data,new_data); @@ -221,10 +253,10 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) return res; } -int ha_heap::delete_row(const byte * buf) +int ha_heap::delete_row(const uchar * buf) { int res; - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); res= heap_delete(file,buf); if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) @@ -238,73 +270,69 @@ int ha_heap::delete_row(const byte * buf) return res; } -int ha_heap::index_read(byte * buf, const byte * key, uint key_len, - enum ha_rkey_function find_flag) +int ha_heap::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error = heap_rkey(file,buf,active_index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; } -int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) +int ha_heap::index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error= heap_rkey(file, buf, active_index, key, key_len, + ha_statistic_increment(&SSV::ha_read_key_count); + int error= heap_rkey(file, buf, active_index, key, keypart_map, HA_READ_PREFIX_LAST); table->status= error ? STATUS_NOT_FOUND : 0; return error; } -int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error = heap_rkey(file, buf, index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error = heap_rkey(file, buf, index, key, keypart_map, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; } -int ha_heap::index_next(byte * buf) +int ha_heap::index_next(uchar * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_heap::index_prev(byte * buf) +int ha_heap::index_prev(uchar * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_heap::index_first(byte * buf) +int ha_heap::index_first(uchar * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_heap::index_last(byte * buf) +int ha_heap::index_last(uchar * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -315,47 +343,45 @@ int ha_heap::rnd_init(bool scan) return scan ? heap_scan_init(file) : 0; } -int ha_heap::rnd_next(byte *buf) +int ha_heap::rnd_next(uchar *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=heap_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_heap::rnd_pos(byte * buf, byte *pos) +int ha_heap::rnd_pos(uchar * buf, uchar *pos) { int error; - HEAP_PTR position; - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); - memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR)); - error=heap_rrnd(file, buf, position); + HEAP_PTR heap_position; + ha_statistic_increment(&SSV::ha_read_rnd_count); + memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); + error=heap_rrnd(file, buf, heap_position); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -void ha_heap::position(const byte *record) +void ha_heap::position(const uchar *record) { *(HEAP_PTR*) ref= heap_position(file); // Ref is aligned } int ha_heap::info(uint flag) { - HEAPINFO info; - (void) heap_info(file,&info,flag); - - errkey= info.errkey; - stats.records = info.records; - stats.deleted = info.deleted; - stats.mean_rec_length=info.reclength; - stats.data_file_length=info.data_length; - stats.index_file_length=info.index_length; - stats.max_data_file_length= info.max_records* info.reclength; - stats.delete_length= info.deleted * info.reclength; + HEAPINFO hp_info; + (void) heap_info(file,&hp_info,flag); + + errkey= hp_info.errkey; + stats.records= hp_info.records; + stats.deleted= hp_info.deleted; + stats.mean_rec_length= hp_info.reclength; + stats.data_file_length= hp_info.data_length; + stats.index_file_length= hp_info.index_length; + stats.max_data_file_length= hp_info.max_records * hp_info.reclength; + stats.delete_length= hp_info.deleted * hp_info.reclength; if (flag & HA_STATUS_AUTO) - stats.auto_increment_value= info.auto_increment; + stats.auto_increment_value= hp_info.auto_increment; /* If info() is called for the first time after open(), we will still have to update the key statistics. Hoping that a table lock is now @@ -530,7 +556,7 @@ int ha_heap::delete_table(const char *name) void ha_heap::drop_table(const char *name) { - heap_drop_table(file); + file->s->delete_on_close= 1; close(); } @@ -627,7 +653,10 @@ int ha_heap::create(const char *name, TABLE *table_arg, seg->length= (uint) key_part->length; seg->flag= key_part->key_part_flag; - seg->charset= field->charset(); + if (field->flags & (ENUM_FLAG | SET_FLAG)) + seg->charset= &my_charset_bin; + else + seg->charset= field->charset(); if (field->null_ptr) { seg->null_bit= field->null_bit; @@ -666,16 +695,16 @@ int ha_heap::create(const char *name, TABLE *table_arg, create_info->auto_increment_value - 1 : 0); hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; hp_create_info.with_auto_increment= found_real_auto_increment; + hp_create_info.internal_table= internal_table; max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row); error= heap_create(name, keys, keydef, share->reclength, (ulong) ((share->max_rows < max_rows && share->max_rows) ? share->max_rows : max_rows), - (ulong) share->min_rows, &hp_create_info); - my_free((gptr) keydef, MYF(0)); - if (file) - info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); + (ulong) share->min_rows, &hp_create_info, &internal_share); + my_free((uchar*) keydef, MYF(0)); + DBUG_ASSERT(file == 0); return (error); } @@ -703,9 +732,10 @@ bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes) { /* Check that auto_increment value was not changed */ - if ((table_changes != IS_EQUAL_YES && - info->used_fields & HA_CREATE_USED_AUTO) && - info->auto_increment_value != 0) + if ((info->used_fields & HA_CREATE_USED_AUTO && + info->auto_increment_value != 0) || + table_changes == IS_EQUAL_NO || + table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet return COMPATIBLE_DATA_NO; return COMPATIBLE_DATA_YES; } diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index 2de80c76999..5c5ad43658e 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -25,13 +25,16 @@ class ha_heap: public handler { HP_INFO *file; + HP_SHARE *internal_share; key_map btree_keys; /* number of records changed since last statistics update */ uint records_changed; uint key_stat_version; + my_bool internal_table; public: ha_heap(handlerton *hton, TABLE_SHARE *table); ~ha_heap() {} + handler *clone(MEM_ROOT *mem_root); const char *table_type() const { return (table->in_use->variables.sql_mode & MODE_MYSQL323) ? @@ -48,14 +51,15 @@ public: ulonglong table_flags() const { return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS | HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT); } ulong index_flags(uint inx, uint part, bool all_parts) const { return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ? - HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE : - HA_ONLY_WHOLE_INDEX); + HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE : + HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR); } const key_map *keys_to_use_for_scanning() { return &btree_keys; } uint max_supported_keys() const { return MAX_KEY; } @@ -68,26 +72,27 @@ public: int open(const char *name, int mode, uint test_if_locked); int close(void); void set_keys_for_scanning(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); + int write_row(uchar * buf); + int update_row(const uchar * old_data, uchar * new_data); + int delete_row(const uchar * buf); virtual void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong *first_value, ulonglong *nb_reserved_values); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); + int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map); + int index_read_idx_map(uchar * buf, uint index, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_next(uchar * buf); + int index_prev(uchar * buf); + int index_first(uchar * buf); + int index_last(uchar * buf); int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); + void position(const uchar *record); int info(uint); int extra(enum ha_extra_function operation); int reset(); @@ -105,11 +110,9 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - int cmp_ref(const byte *ref1, const byte *ref2) + int cmp_ref(const uchar *ref1, const uchar *ref2) { - HEAP_PTR ptr1=*(HEAP_PTR*)ref1; - HEAP_PTR ptr2=*(HEAP_PTR*)ref2; - return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0); + return memcmp(ref1, ref2, sizeof(HEAP_PTR)); } bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); private: diff --git a/storage/heap/heapdef.h b/storage/heap/heapdef.h index 016c83db8e0..3fc94062303 100644 --- a/storage/heap/heapdef.h +++ b/storage/heap/heapdef.h @@ -16,6 +16,7 @@ /* This file is included in all heap-files */ #include <my_base.h> /* This includes global */ +C_MODE_START #ifdef THREAD #include <my_pthread.h> #endif @@ -48,7 +49,7 @@ if (!(info->update & HA_STATE_AKTIV))\ typedef struct st_hp_hash_info { struct st_hp_hash_info *next_key; - byte *ptr_to_rec; + uchar *ptr_to_rec; } HASH_INFO; typedef struct { @@ -60,50 +61,51 @@ typedef struct { /* Prototypes for intern functions */ extern HP_SHARE *hp_find_named_heap(const char *name); -extern int hp_rectest(HP_INFO *info,const byte *old); -extern byte *hp_find_block(HP_BLOCK *info,ulong pos); -extern int hp_get_new_block(HP_BLOCK *info, ulong* alloc_length); +extern int hp_rectest(HP_INFO *info,const uchar *old); +extern uchar *hp_find_block(HP_BLOCK *info,ulong pos); +extern int hp_get_new_block(HP_BLOCK *info, size_t* alloc_length); extern void hp_free(HP_SHARE *info); -extern byte *hp_free_level(HP_BLOCK *block,uint level,HP_PTRS *pos, - byte *last_pos); +extern uchar *hp_free_level(HP_BLOCK *block,uint level,HP_PTRS *pos, + uchar *last_pos); extern int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, - const byte *record, byte *recpos); + const uchar *record, uchar *recpos); extern int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, - const byte *record, byte *recpos); + const uchar *record, uchar *recpos); extern int hp_rb_delete_key(HP_INFO *info,HP_KEYDEF *keyinfo, - const byte *record,byte *recpos,int flag); + const uchar *record,uchar *recpos,int flag); extern int hp_delete_key(HP_INFO *info,HP_KEYDEF *keyinfo, - const byte *record,byte *recpos,int flag); + const uchar *record,uchar *recpos,int flag); extern HASH_INFO *_heap_find_hash(HP_BLOCK *block,ulong pos); -extern byte *hp_search(HP_INFO *info,HP_KEYDEF *keyinfo,const byte *key, +extern uchar *hp_search(HP_INFO *info,HP_KEYDEF *keyinfo,const uchar *key, uint nextflag); -extern byte *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, - const byte *key, HASH_INFO *pos); -extern ulong hp_hashnr(HP_KEYDEF *keyinfo,const byte *key); -extern ulong hp_rec_hashnr(HP_KEYDEF *keyinfo,const byte *rec); +extern uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, + const uchar *key, HASH_INFO *pos); +extern ulong hp_hashnr(HP_KEYDEF *keyinfo,const uchar *key); +extern ulong hp_rec_hashnr(HP_KEYDEF *keyinfo,const uchar *rec); extern ulong hp_mask(ulong hashnr,ulong buffmax,ulong maxlength); extern void hp_movelink(HASH_INFO *pos,HASH_INFO *next_link, HASH_INFO *newlink); -extern int hp_rec_key_cmp(HP_KEYDEF *keydef,const byte *rec1, - const byte *rec2, +extern int hp_rec_key_cmp(HP_KEYDEF *keydef,const uchar *rec1, + const uchar *rec2, my_bool diff_if_only_endspace_difference); -extern int hp_key_cmp(HP_KEYDEF *keydef,const byte *rec, - const byte *key); -extern void hp_make_key(HP_KEYDEF *keydef,byte *key,const byte *rec); -extern uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, - const byte *rec, byte *recpos); -extern uint hp_rb_key_length(HP_KEYDEF *keydef, const byte *key); -extern uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key); -extern uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key); -extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const byte *record); +extern int hp_key_cmp(HP_KEYDEF *keydef,const uchar *rec, + const uchar *key); +extern void hp_make_key(HP_KEYDEF *keydef,uchar *key,const uchar *rec); +extern uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key, + const uchar *rec, uchar *recpos); +extern uint hp_rb_key_length(HP_KEYDEF *keydef, const uchar *key); +extern uint hp_rb_null_key_length(HP_KEYDEF *keydef, const uchar *key); +extern uint hp_rb_var_key_length(HP_KEYDEF *keydef, const uchar *key); +extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const uchar *record); extern int hp_close(register HP_INFO *info); extern void hp_clear(HP_SHARE *info); extern void hp_clear_keys(HP_SHARE *info); -extern uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, - uint k_len); +extern uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, + key_part_map keypart_map); #ifdef THREAD extern pthread_mutex_t THR_LOCK_heap; #else #define pthread_mutex_lock(A) #define pthread_mutex_unlock(A) #endif +C_MODE_END diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c index 85219380287..c622a9e52f8 100644 --- a/storage/heap/hp_block.c +++ b/storage/heap/hp_block.c @@ -26,7 +26,7 @@ {p_0, p_1, ...} serve as indexes to descend the blocks tree. */ -byte *hp_find_block(HP_BLOCK *block, ulong pos) +uchar *hp_find_block(HP_BLOCK *block, ulong pos) { reg1 int i; reg3 HP_PTRS *ptr; /* block base ptr */ @@ -36,12 +36,13 @@ byte *hp_find_block(HP_BLOCK *block, ulong pos) ptr=(HP_PTRS*)ptr->blocks[pos/block->level_info[i].records_under_level]; pos%=block->level_info[i].records_under_level; } - return (byte*) ptr+ pos*block->recbuffer; + return (uchar*) ptr+ pos*block->recbuffer; } /* Get one new block-of-records. Alloc ptr to block if needed + SYNOPSIS hp_get_new_block() block HP_BLOCK tree-like block @@ -53,7 +54,7 @@ byte *hp_find_block(HP_BLOCK *block, ulong pos) 1 Out of memory */ -int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length) +int hp_get_new_block(HP_BLOCK *block, size_t *alloc_length) { reg1 uint i,j; HP_PTRS *root; @@ -101,13 +102,13 @@ int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length) /* Occupy the free slot we've found at level i */ block->level_info[i].last_blocks-> blocks[HP_PTRS_IN_NOD - block->level_info[i].free_ptrs_in_block--]= - (byte*) root; + (uchar*) root; /* Add a block subtree with each node having one left-most child */ for (j=i-1 ; j >0 ; j--) { block->level_info[j].last_blocks= root++; - block->level_info[j].last_blocks->blocks[0]=(byte*) root; + block->level_info[j].last_blocks->blocks[0]=(uchar*) root; block->level_info[j].free_ptrs_in_block=HP_PTRS_IN_NOD-1; } @@ -124,27 +125,27 @@ int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length) /* free all blocks under level */ -byte *hp_free_level(HP_BLOCK *block, uint level, HP_PTRS *pos, byte *last_pos) +uchar *hp_free_level(HP_BLOCK *block, uint level, HP_PTRS *pos, uchar *last_pos) { int i,max_pos; - byte *next_ptr; + uchar *next_ptr; if (level == 1) - next_ptr=(byte*) pos+block->recbuffer; + next_ptr=(uchar*) pos+block->recbuffer; else { max_pos= (block->level_info[level-1].last_blocks == pos) ? HP_PTRS_IN_NOD - block->level_info[level-1].free_ptrs_in_block : HP_PTRS_IN_NOD; - next_ptr=(byte*) (pos+1); + next_ptr=(uchar*) (pos+1); for (i=0 ; i < max_pos ; i++) next_ptr=hp_free_level(block,level-1, (HP_PTRS*) pos->blocks[i],next_ptr); } - if ((byte*) pos != last_pos) + if ((uchar*) pos != last_pos) { - my_free((gptr) pos,MYF(0)); + my_free((uchar*) pos,MYF(0)); return last_pos; } return next_ptr; /* next memory position */ diff --git a/storage/heap/hp_clear.c b/storage/heap/hp_clear.c index 2d8b8b394d5..babfcbd6f41 100644 --- a/storage/heap/hp_clear.c +++ b/storage/heap/hp_clear.c @@ -32,7 +32,7 @@ void hp_clear(HP_SHARE *info) if (info->block.levels) VOID(hp_free_level(&info->block,info->block.levels,info->block.root, - (byte*) 0)); + (uchar*) 0)); info->block.levels=0; hp_clear_keys(info); info->records= info->deleted= 0; @@ -94,7 +94,7 @@ void hp_clear_keys(HP_SHARE *info) { HP_BLOCK *block= &keyinfo->block; if (block->levels) - VOID(hp_free_level(block,block->levels,block->root,(byte*) 0)); + VOID(hp_free_level(block,block->levels,block->root,(uchar*) 0)); block->levels=0; block->last_allocated=0; keyinfo->hash_buckets= 0; diff --git a/storage/heap/hp_close.c b/storage/heap/hp_close.c index 5f6fc3249b5..d571815980c 100644 --- a/storage/heap/hp_close.c +++ b/storage/heap/hp_close.c @@ -42,9 +42,10 @@ int hp_close(register HP_INFO *info) } #endif info->s->changed=0; - heap_open_list=list_delete(heap_open_list,&info->open_list); + if (info->open_list.data) + heap_open_list=list_delete(heap_open_list,&info->open_list); if (!--info->s->open_count && info->s->delete_on_close) hp_free(info->s); /* Table was deleted */ - my_free((gptr) info,MYF(0)); + my_free((uchar*) info,MYF(0)); DBUG_RETURN(error); } diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index 4e1347966b9..b6814fc1614 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -19,33 +19,37 @@ static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2); static void init_block(HP_BLOCK *block,uint reclength,ulong min_records, ulong max_records); +/* Create a heap table */ + int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, uint reclength, ulong max_records, ulong min_records, - HP_CREATE_INFO *create_info) + HP_CREATE_INFO *create_info, HP_SHARE **res) { uint i, j, key_segs, max_length, length; - HP_SHARE *share; + HP_SHARE *share= 0; HA_KEYSEG *keyseg; - DBUG_ENTER("heap_create"); - pthread_mutex_lock(&THR_LOCK_heap); - if ((share= hp_find_named_heap(name)) && share->open_count == 0) + if (!create_info->internal_table) { - hp_free(share); - share= NULL; - } - + pthread_mutex_lock(&THR_LOCK_heap); + if ((share= hp_find_named_heap(name)) && share->open_count == 0) + { + hp_free(share); + share= 0; + } + } + if (!share) { HP_KEYDEF *keyinfo; DBUG_PRINT("info",("Initializing new table")); /* - We have to store sometimes byte* del_link in records, - so the record length should be at least sizeof(byte*) + We have to store sometimes uchar* del_link in records, + so the record length should be at least sizeof(uchar*) */ - set_if_bigger(reclength, sizeof (byte*)); + set_if_bigger(reclength, sizeof (uchar*)); for (i= key_segs= max_length= 0, keyinfo= keydef; i < keys; i++, keyinfo++) { @@ -112,7 +116,7 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, } keyinfo->length= length; length+= keyinfo->rb_tree.size_of_element + - ((keyinfo->algorithm == HA_KEY_ALG_BTREE) ? sizeof(byte*) : 0); + ((keyinfo->algorithm == HA_KEY_ALG_BTREE) ? sizeof(uchar*) : 0); if (length > max_length) max_length= length; key_segs+= keyinfo->keysegs; @@ -131,10 +135,7 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, keys*sizeof(HP_KEYDEF)+ key_segs*sizeof(HA_KEYSEG), MYF(MY_ZEROFILL)))) - { - pthread_mutex_unlock(&THR_LOCK_heap); - DBUG_RETURN(1); - } + goto err; share->keydef= (HP_KEYDEF*) (share + 1); share->key_stat_version= 1; keyseg= (HA_KEYSEG*) (share->keydef + keys); @@ -152,12 +153,12 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, { /* additional HA_KEYTYPE_END keyseg */ keyseg->type= HA_KEYTYPE_END; - keyseg->length= sizeof(byte*); + keyseg->length= sizeof(uchar*); keyseg->flag= 0; keyseg->null_bit= 0; keyseg++; - init_tree(&keyinfo->rb_tree, 0, 0, sizeof(byte*), + init_tree(&keyinfo->rb_tree, 0, 0, sizeof(uchar*), (qsort_cmp2)keys_compare, 1, NULL, NULL); keyinfo->delete_key= hp_rb_delete_key; keyinfo->write_key= hp_rb_write_key; @@ -188,21 +189,34 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, /* Must be allocated separately for rename to work */ if (!(share->name= my_strdup(name,MYF(0)))) { - my_free((gptr) share,MYF(0)); - pthread_mutex_unlock(&THR_LOCK_heap); - DBUG_RETURN(1); + my_free((uchar*) share,MYF(0)); + goto err; } #ifdef THREAD thr_lock_init(&share->lock); VOID(pthread_mutex_init(&share->intern_lock,MY_MUTEX_INIT_FAST)); #endif - share->open_list.data= (void*) share; - heap_share_list= list_add(heap_share_list,&share->open_list); + if (!create_info->internal_table) + { + share->open_list.data= (void*) share; + heap_share_list= list_add(heap_share_list,&share->open_list); + } + else + share->delete_on_close= 1; } - pthread_mutex_unlock(&THR_LOCK_heap); + if (!create_info->internal_table) + pthread_mutex_unlock(&THR_LOCK_heap); + + *res= share; DBUG_RETURN(0); + +err: + if (!create_info->internal_table) + pthread_mutex_unlock(&THR_LOCK_heap); + DBUG_RETURN(1); } /* heap_create */ + static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2) { uint not_used[2]; @@ -218,7 +232,7 @@ static void init_block(HP_BLOCK *block, uint reclength, ulong min_records, max_records= max(min_records,max_records); if (!max_records) max_records= 1000; /* As good as quess as anything */ - recbuffer= (uint) (reclength + sizeof(byte**) - 1) & ~(sizeof(byte**) - 1); + recbuffer= (uint) (reclength + sizeof(uchar**) - 1) & ~(sizeof(uchar**) - 1); records_in_block= max_records / 10; if (records_in_block < 10 && max_records) records_in_block= 10; @@ -279,13 +293,14 @@ void heap_drop_table(HP_INFO *info) void hp_free(HP_SHARE *share) { - heap_share_list= list_delete(heap_share_list, &share->open_list); + if (share->open_list.data) /* If not internal table */ + heap_share_list= list_delete(heap_share_list, &share->open_list); hp_clear(share); /* Remove blocks from memory */ #ifdef THREAD thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->intern_lock)); #endif - my_free((gptr) share->name, MYF(0)); - my_free((gptr) share, MYF(0)); + my_free((uchar*) share->name, MYF(0)); + my_free((uchar*) share, MYF(0)); return; } diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c index 637e5f1a497..1dd79a42e0b 100644 --- a/storage/heap/hp_delete.c +++ b/storage/heap/hp_delete.c @@ -17,9 +17,9 @@ #include "heapdef.h" -int heap_delete(HP_INFO *info, const byte *record) +int heap_delete(HP_INFO *info, const uchar *record) { - byte *pos; + uchar *pos; HP_SHARE *share=info->s; HP_KEYDEF *keydef, *end, *p_lastinx; DBUG_ENTER("heap_delete"); @@ -43,7 +43,7 @@ int heap_delete(HP_INFO *info, const byte *record) } info->update=HA_STATE_DELETED; - *((byte**) pos)=share->del_link; + *((uchar**) pos)=share->del_link; share->del_link=pos; pos[share->reclength]=0; /* Record deleted */ share->deleted++; @@ -65,7 +65,7 @@ err: */ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, - const byte *record, byte *recpos, int flag) + const uchar *record, uchar *recpos, int flag) { heap_rb_param custom_arg; uint old_allocated; @@ -105,7 +105,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, */ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, - const byte *record, byte *recpos, int flag) + const uchar *record, uchar *recpos, int flag) { ulong blength,pos2,pos_hashnr,lastpos_hashnr; HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr; diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index c5a30a3ef65..aaaa0fe833f 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -35,7 +35,7 @@ HA_READ_KEY_EXACT Include the key in the range HA_READ_AFTER_KEY Don't include key in range - max_key.flag can have one of the following values: + max_key.flag can have one of the following values: HA_READ_BEFORE_KEY Don't include key in range HA_READ_AFTER_KEY Include all 'end_key' values in the range @@ -62,7 +62,7 @@ ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key, { custom_arg.key_length= hp_rb_pack_key(keyinfo, (uchar*) info->recbuf, (uchar*) min_key->key, - min_key->length); + min_key->keypart_map); start_pos= tree_record_pos(rb_tree, info->recbuf, min_key->flag, &custom_arg); } @@ -70,12 +70,12 @@ ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key, { start_pos= 0; } - + if (max_key) { custom_arg.key_length= hp_rb_pack_key(keyinfo, (uchar*) info->recbuf, (uchar*) max_key->key, - max_key->length); + max_key->keypart_map); end_pos= tree_record_pos(rb_tree, info->recbuf, max_key->flag, &custom_arg); } @@ -97,7 +97,7 @@ ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key, /* Sets info->current_ptr to found record */ /* next_flag: Search=0, next=1, prev =2, same =3 */ -byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key, +uchar *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, uint nextflag) { reg1 HASH_INFO *pos,*prev_ptr; @@ -175,7 +175,7 @@ byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key, since last read ! */ -byte *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key, +uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, HASH_INFO *pos) { DBUG_ENTER("hp_search_next"); @@ -238,7 +238,7 @@ void hp_movelink(HASH_INFO *pos, HASH_INFO *next_link, HASH_INFO *newlink) /* Calc hashvalue for a key */ -ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) +ulong hp_hashnr(register HP_KEYDEF *keydef, register const uchar *key) { /*register*/ ulong nr=1, nr2=4; @@ -304,7 +304,7 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) /* Calc hashvalue for a key in a record */ -ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) +ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec) { ulong nr=1, nr2=4; HA_KEYSEG *seg,*endseg; @@ -377,9 +377,15 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) * far, and works well on both numbers and strings. */ -ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) +ulong hp_hashnr(register HP_KEYDEF *keydef, register const uchar *key) { - register ulong nr=0; + /* + Note, if a key consists of a combination of numeric and + a text columns, it most likely won't work well. + Making text columns work with NEW_HASH_FUNCTION + needs also changes in strings/ctype-xxx.c. + */ + ulong nr= 1, nr2= 4; HA_KEYSEG *seg,*endseg; for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++) @@ -401,14 +407,15 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } if (seg->type == HA_KEYTYPE_TEXT) { - seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); + seg->charset->coll->hash_sort(seg->charset, pos, ((uchar*)key)-pos, + &nr, &nr2); } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uint pack_length= 2; /* Key packing is constant */ uint length= uint2korr(pos); - seg->charset->hash_sort(seg->charset, pos+pack_length, length, &nr, - NULL); + seg->charset->coll->hash_sort(seg->charset, pos+pack_length, length, + &nr, &nr2); key+= pack_length; } else @@ -426,9 +433,9 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) /* Calc hashvalue for a key in a record */ -ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) +ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec) { - register ulong nr=0; + ulong nr= 1, nr2= 4; HA_KEYSEG *seg,*endseg; for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++) @@ -444,14 +451,16 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } if (seg->type == HA_KEYTYPE_TEXT) { - seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); + uint char_length= seg->length; /* TODO: fix to use my_charpos() */ + seg->charset->coll->hash_sort(seg->charset, pos, char_length, + &nr, &nr2); } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uint pack_length= seg->bit_start; uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos)); - seg->charset->hash_sort(seg->charset, pos+pack_length, - length, &nr, NULL); + seg->charset->coll->hash_sort(seg->charset, pos+pack_length, + length, &nr, &nr2); } else { @@ -490,7 +499,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) <> 0 Key differes */ -int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, +int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2, my_bool diff_if_only_endspace_difference) { HA_KEYSEG *seg,*endseg; @@ -577,7 +586,7 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, /* Compare a key in a record to a whole key */ -int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key) +int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key) { HA_KEYSEG *seg,*endseg; @@ -661,7 +670,7 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key) /* Copy a key from a record to a keybuffer */ -void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec) +void hp_make_key(HP_KEYDEF *keydef, uchar *key, const uchar *rec) { HA_KEYSEG *seg,*endseg; @@ -693,10 +702,10 @@ void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec) } while(0) -uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, - const byte *rec, byte *recpos) +uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key, + const uchar *rec, uchar *recpos) { - byte *start_key= key; + uchar *start_key= key; HA_KEYSEG *seg, *endseg; for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++) @@ -710,7 +719,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, if (seg->flag & HA_SWAP_KEY) { uint length= seg->length; - byte *pos= (byte*) rec + seg->start; + uchar *pos= (uchar*) rec + seg->start; #ifdef HAVE_ISNAN if (seg->type == HA_KEYTYPE_FLOAT) @@ -759,7 +768,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key,(byte*) pos,(size_t) char_length); + memcpy((uchar*) key,(uchar*) pos,(size_t) char_length); key+= char_length; continue; } @@ -772,42 +781,38 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, char_length / seg->charset->mbmaxlen); set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */ if (char_length < seg->length) - seg->charset->cset->fill(seg->charset, (char*) key + char_length, + seg->charset->cset->fill(seg->charset, (char*) key + char_length, seg->length - char_length, ' '); } memcpy(key, rec + seg->start, (size_t) char_length); key+= seg->length; } - memcpy(key, &recpos, sizeof(byte*)); + memcpy(key, &recpos, sizeof(uchar*)); return (uint) (key - start_key); } uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, - uint k_len) + key_part_map keypart_map) { HA_KEYSEG *seg, *endseg; uchar *start_key= key; - + for (seg= keydef->seg, endseg= seg + keydef->keysegs; - seg < endseg && (int) k_len > 0; old+= seg->length, seg++) + seg < endseg && keypart_map; old+= seg->length, seg++) { uint char_length; + keypart_map>>= 1; if (seg->null_bit) { - k_len--; if (!(*key++= (char) 1 - *old++)) - { - k_len-= seg->length; continue; } - } if (seg->flag & HA_SWAP_KEY) { uint length= seg->length; - byte *pos= (byte*) old + length; + uchar *pos= (uchar*) old + length; - k_len-= length; while (length--) { *key++= *--pos; @@ -822,12 +827,11 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, CHARSET_INFO *cs= seg->charset; char_length= length/cs->mbmaxlen; - k_len-= 2+length; old+= 2; set_if_smaller(length,tmp_length); /* Safety */ FIX_LENGTH(cs, old, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key, old,(size_t) char_length); + memcpy((uchar*) key, old,(size_t) char_length); key+= char_length; continue; } @@ -843,22 +847,21 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old, } memcpy(key, old, (size_t) char_length); key+= seg->length; - k_len-= seg->length; } return (uint) (key - start_key); } uint hp_rb_key_length(HP_KEYDEF *keydef, - const byte *key __attribute__((unused))) + const uchar *key __attribute__((unused))) { return keydef->length; } -uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key) +uint hp_rb_null_key_length(HP_KEYDEF *keydef, const uchar *key) { - const byte *start_key= key; + const uchar *start_key= key; HA_KEYSEG *seg, *endseg; for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++) @@ -871,9 +874,9 @@ uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key) } -uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key) +uint hp_rb_var_key_length(HP_KEYDEF *keydef, const uchar *key) { - const byte *start_key= key; + const uchar *start_key= key; HA_KEYSEG *seg, *endseg; for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++) @@ -898,7 +901,7 @@ uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key) 0 otherwise */ -my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const byte *record) +my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const uchar *record) { HA_KEYSEG *seg,*endseg; for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++) @@ -924,7 +927,7 @@ my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const byte *record) less than zero. */ -void heap_update_auto_increment(HP_INFO *info, const byte *record) +void heap_update_auto_increment(HP_INFO *info, const uchar *record) { ulonglong value= 0; /* Store unsigned values here */ longlong s_value= 0; /* Store signed values here */ diff --git a/storage/heap/hp_info.c b/storage/heap/hp_info.c index 2c58604eed1..ea78c53fd40 100644 --- a/storage/heap/hp_info.c +++ b/storage/heap/hp_info.c @@ -18,7 +18,7 @@ #include "heapdef.h" -byte *heap_position(HP_INFO *info) +uchar *heap_position(HP_INFO *info) { return ((info->update & HA_STATE_AKTIV) ? info->current_ptr : (HEAP_PTR) 0); diff --git a/storage/heap/hp_open.c b/storage/heap/hp_open.c index 02a8d4f95ca..4d5ec6e27ac 100644 --- a/storage/heap/hp_open.c +++ b/storage/heap/hp_open.c @@ -22,43 +22,34 @@ #include "my_sys.h" -HP_INFO *heap_open(const char *name, int mode) +/* + Open heap table based on HP_SHARE structure + + NOTE + This doesn't register the table in the open table list. +*/ + +HP_INFO *heap_open_from_share(HP_SHARE *share, int mode) { HP_INFO *info; - HP_SHARE *share; + DBUG_ENTER("heap_open_from_share"); - DBUG_ENTER("heap_open"); - pthread_mutex_lock(&THR_LOCK_heap); - if (!(share= hp_find_named_heap(name))) - { - my_errno= ENOENT; - pthread_mutex_unlock(&THR_LOCK_heap); - DBUG_RETURN(0); - } if (!(info= (HP_INFO*) my_malloc((uint) sizeof(HP_INFO) + 2 * share->max_key_length, MYF(MY_ZEROFILL)))) { - pthread_mutex_unlock(&THR_LOCK_heap); DBUG_RETURN(0); } share->open_count++; #ifdef THREAD thr_lock_data_init(&share->lock,&info->lock,NULL); #endif - info->open_list.data= (void*) info; - heap_open_list= list_add(heap_open_list,&info->open_list); - pthread_mutex_unlock(&THR_LOCK_heap); - info->s= share; - info->lastkey= (byte*) (info + 1); - info->recbuf= (byte*) (info->lastkey + share->max_key_length); + info->lastkey= (uchar*) (info + 1); + info->recbuf= (uchar*) (info->lastkey + share->max_key_length); info->mode= mode; info->current_record= (ulong) ~0L; /* No current record */ - info->current_ptr= 0; - info->current_hash_ptr= 0; info->lastinx= info->errkey= -1; - info->update= 0; #ifndef DBUG_OFF info->opt_flag= READ_CHECK_USED; /* Check when changing */ #endif @@ -68,7 +59,59 @@ HP_INFO *heap_open(const char *name, int mode) DBUG_RETURN(info); } - /* map name to a heap-nr. If name isn't found return 0 */ + +/* + Open heap table based on HP_SHARE structure and register it +*/ + +HP_INFO *heap_open_from_share_and_register(HP_SHARE *share, int mode) +{ + HP_INFO *info; + DBUG_ENTER("heap_open_from_share_and_register"); + + pthread_mutex_lock(&THR_LOCK_heap); + if ((info= heap_open_from_share(share, mode))) + { + info->open_list.data= (void*) info; + heap_open_list= list_add(heap_open_list,&info->open_list); + } + pthread_mutex_unlock(&THR_LOCK_heap); + DBUG_RETURN(info); +} + + +/* + Open heap table based on name + + NOTE + This register the table in the open table list. so that it can be + found by future heap_open() calls. +*/ + +HP_INFO *heap_open(const char *name, int mode) +{ + HP_INFO *info; + HP_SHARE *share; + DBUG_ENTER("heap_open"); + + pthread_mutex_lock(&THR_LOCK_heap); + if (!(share= hp_find_named_heap(name))) + { + my_errno= ENOENT; + pthread_mutex_unlock(&THR_LOCK_heap); + DBUG_RETURN(0); + } + if ((info= heap_open_from_share(share, mode))) + { + info->open_list.data= (void*) info; + heap_open_list= list_add(heap_open_list,&info->open_list); + } + pthread_mutex_unlock(&THR_LOCK_heap); + DBUG_RETURN(info); +} + + +/* map name to a heap-nr. If name isn't found return 0 */ HP_SHARE *hp_find_named_heap(const char *name) { diff --git a/storage/heap/hp_rfirst.c b/storage/heap/hp_rfirst.c index d1842949421..48c1e625bd8 100644 --- a/storage/heap/hp_rfirst.c +++ b/storage/heap/hp_rfirst.c @@ -17,7 +17,7 @@ /* Read first record with the current key */ -int heap_rfirst(HP_INFO *info, byte *record, int inx) +int heap_rfirst(HP_INFO *info, uchar *record, int inx) { HP_SHARE *share = info->s; HP_KEYDEF *keyinfo = share->keydef + inx; @@ -26,13 +26,13 @@ int heap_rfirst(HP_INFO *info, byte *record, int inx) info->lastinx= inx; if (keyinfo->algorithm == HA_KEY_ALG_BTREE) { - byte *pos; + uchar *pos; if ((pos = tree_search_edge(&keyinfo->rb_tree, info->parents, &info->last_pos, offsetof(TREE_ELEMENT, left)))) { memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), - sizeof(byte*)); + sizeof(uchar*)); info->current_ptr = pos; memcpy(record, pos, (size_t)share->reclength); info->update = HA_STATE_AKTIV; diff --git a/storage/heap/hp_rkey.c b/storage/heap/hp_rkey.c index a095336d295..6eeac6acd7b 100644 --- a/storage/heap/hp_rkey.c +++ b/storage/heap/hp_rkey.c @@ -15,10 +15,10 @@ #include "heapdef.h" -int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, - uint key_len, enum ha_rkey_function find_flag) +int heap_rkey(HP_INFO *info, uchar *record, int inx, const uchar *key, + key_part_map keypart_map, enum ha_rkey_function find_flag) { - byte *pos; + uchar *pos; HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; DBUG_ENTER("heap_rkey"); @@ -38,7 +38,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, custom_arg.keyseg= info->s->keydef[inx].seg; custom_arg.key_length= info->lastkey_len= hp_rb_pack_key(keyinfo, (uchar*) info->lastkey, - (uchar*) key, key_len); + (uchar*) key, keypart_map); custom_arg.search_flag= SEARCH_FIND | SEARCH_SAME; /* for next rkey() after deletion */ if (find_flag == HA_READ_AFTER_KEY) @@ -53,7 +53,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, info->update= 0; DBUG_RETURN(my_errno= HA_ERR_KEY_NOT_FOUND); } - memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), sizeof(byte*)); + memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), sizeof(uchar*)); info->current_ptr= pos; } else @@ -74,7 +74,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, /* Quick find of record */ -gptr heap_find(HP_INFO *info, int inx, const byte *key) +uchar* heap_find(HP_INFO *info, int inx, const uchar *key) { return hp_search(info, info->s->keydef + inx, key, 0); } diff --git a/storage/heap/hp_rlast.c b/storage/heap/hp_rlast.c index b72e815147f..45ad7c21f49 100644 --- a/storage/heap/hp_rlast.c +++ b/storage/heap/hp_rlast.c @@ -18,7 +18,7 @@ /* Read first record with the current key */ -int heap_rlast(HP_INFO *info, byte *record, int inx) +int heap_rlast(HP_INFO *info, uchar *record, int inx) { HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; @@ -27,13 +27,13 @@ int heap_rlast(HP_INFO *info, byte *record, int inx) info->lastinx= inx; if (keyinfo->algorithm == HA_KEY_ALG_BTREE) { - byte *pos; + uchar *pos; if ((pos = tree_search_edge(&keyinfo->rb_tree, info->parents, &info->last_pos, offsetof(TREE_ELEMENT, right)))) { memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), - sizeof(byte*)); + sizeof(uchar*)); info->current_ptr = pos; memcpy(record, pos, (size_t)share->reclength); info->update = HA_STATE_AKTIV; diff --git a/storage/heap/hp_rnext.c b/storage/heap/hp_rnext.c index 3b436fe87aa..262754e9e64 100644 --- a/storage/heap/hp_rnext.c +++ b/storage/heap/hp_rnext.c @@ -17,9 +17,9 @@ /* Read next record with the same key */ -int heap_rnext(HP_INFO *info, byte *record) +int heap_rnext(HP_INFO *info, uchar *record) { - byte *pos; + uchar *pos; HP_SHARE *share=info->s; HP_KEYDEF *keyinfo; DBUG_ENTER("heap_rnext"); @@ -47,7 +47,7 @@ int heap_rnext(HP_INFO *info, byte *record) if (pos) { memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), - sizeof(byte*)); + sizeof(uchar*)); info->current_ptr = pos; } else diff --git a/storage/heap/hp_rprev.c b/storage/heap/hp_rprev.c index bfdd2f9d47a..63bfffffba9 100644 --- a/storage/heap/hp_rprev.c +++ b/storage/heap/hp_rprev.c @@ -18,9 +18,9 @@ /* Read prev record for key */ -int heap_rprev(HP_INFO *info, byte *record) +int heap_rprev(HP_INFO *info, uchar *record) { - byte *pos; + uchar *pos; HP_SHARE *share=info->s; HP_KEYDEF *keyinfo; DBUG_ENTER("heap_rprev"); @@ -47,7 +47,7 @@ int heap_rprev(HP_INFO *info, byte *record) if (pos) { memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), - sizeof(byte*)); + sizeof(uchar*)); info->current_ptr = pos; } else diff --git a/storage/heap/hp_rrnd.c b/storage/heap/hp_rrnd.c index ad0190cc00c..3ac23d293f2 100644 --- a/storage/heap/hp_rrnd.c +++ b/storage/heap/hp_rrnd.c @@ -24,7 +24,7 @@ HA_ERR_END_OF_FILE = EOF. */ -int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) +int heap_rrnd(register HP_INFO *info, uchar *record, uchar *pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); @@ -59,7 +59,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) HA_ERR_END_OF_FILE = EOF. */ -int heap_rrnd_old(register HP_INFO *info, byte *record, ulong pos) +int heap_rrnd_old(register HP_INFO *info, uchar *record, ulong pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); diff --git a/storage/heap/hp_rsame.c b/storage/heap/hp_rsame.c index 10513f91726..1a3724672b6 100644 --- a/storage/heap/hp_rsame.c +++ b/storage/heap/hp_rsame.c @@ -25,7 +25,7 @@ HA_ERR_KEY_NOT_FOUND = Record not found with key */ -int heap_rsame(register HP_INFO *info, byte *record, int inx) +int heap_rsame(register HP_INFO *info, uchar *record, int inx) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rsame"); diff --git a/storage/heap/hp_scan.c b/storage/heap/hp_scan.c index 4249ac4148a..e8913e92c86 100644 --- a/storage/heap/hp_scan.c +++ b/storage/heap/hp_scan.c @@ -34,7 +34,7 @@ int heap_scan_init(register HP_INFO *info) DBUG_RETURN(0); } -int heap_scan(register HP_INFO *info, byte *record) +int heap_scan(register HP_INFO *info, uchar *record) { HP_SHARE *share=info->s; ulong pos; diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c index 31c9b8f2f30..b1b55098a78 100644 --- a/storage/heap/hp_test1.c +++ b/storage/heap/hp_test1.c @@ -32,11 +32,12 @@ int main(int argc, char **argv) { int i,j,error,deleted; HP_INFO *file; - char record[128],key[32]; + uchar record[128],key[32]; const char *filename; HP_KEYDEF keyinfo[10]; HA_KEYSEG keyseg[4]; HP_CREATE_INFO hp_create_info; + HP_SHARE *tmp_share; MY_INIT(argv[0]); filename= "test1"; @@ -52,23 +53,24 @@ int main(int argc, char **argv) keyinfo[0].seg[0].start=1; keyinfo[0].seg[0].length=6; keyinfo[0].seg[0].charset= &my_charset_latin1; + keyinfo[0].seg[0].null_bit= 0; keyinfo[0].flag = HA_NOSAME; deleted=0; - bzero((gptr) flags,sizeof(flags)); + bzero((uchar*) flags,sizeof(flags)); printf("- Creating heap-file\n"); if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000L,10L, - &hp_create_info) || + &hp_create_info, &tmp_share) || !(file= heap_open(filename, 2))) goto err; printf("- Writing records:s\n"); - strmov(record," ..... key "); + strmov((char*) record," ..... key "); for (i=49 ; i>=1 ; i-=2 ) { j=i%25 +1; - sprintf(key,"%6d",j); + sprintf((char*) key,"%6d",j); bmove(record+1,key,6); error=heap_write(file,record); if (heap_check_heap(file,0)) @@ -90,18 +92,18 @@ int main(int argc, char **argv) for (i=1 ; i<=10 ; i++) { if (i == remove_ant) { VOID(heap_close(file)) ; return (0) ; } - sprintf(key,"%6d",(j=(int) ((rand() & 32767)/32767.*25))); + sprintf((char*) key,"%6d",(j=(int) ((rand() & 32767)/32767.*25))); if ((error = heap_rkey(file,record,0,key,6,HA_READ_KEY_EXACT))) { if (verbose || (flags[j] == 1 || (error && my_errno != HA_ERR_KEY_NOT_FOUND))) - printf("key: %s rkey: %3d my_errno: %3d\n",key,error,my_errno); + printf("key: %s rkey: %3d my_errno: %3d\n",(char*) key,error,my_errno); } else { error=heap_delete(file,record); if (error || verbose) - printf("key: %s delete: %d my_errno: %d\n",key,error,my_errno); + printf("key: %s delete: %d my_errno: %d\n",(char*) key,error,my_errno); flags[j]=0; if (! error) deleted++; @@ -116,7 +118,7 @@ int main(int argc, char **argv) printf("- Reading records with key\n"); for (i=1 ; i<=25 ; i++) { - sprintf(key,"%6d",i); + sprintf((char*) key,"%6d",i); bmove(record+1,key,6); my_errno=0; error=heap_rkey(file,record,0,key,6,HA_READ_KEY_EXACT); @@ -125,7 +127,7 @@ int main(int argc, char **argv) (error && (flags[i] != 0 || my_errno != HA_ERR_KEY_NOT_FOUND))) { printf("key: %s rkey: %3d my_errno: %3d record: %s\n", - key,error,my_errno,record+1); + (char*) key,error,my_errno,record+1); } } @@ -148,7 +150,7 @@ int main(int argc, char **argv) if (verbose || (error != 0 && error != HA_ERR_RECORD_DELETED)) { printf("pos: %2d ni_rrnd: %3d my_errno: %3d record: %s\n", - i-1,error,my_errno,record+1); + i-1,error,my_errno,(char*) record+1); } } } diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c index dcca5fb44b9..e57a554e5d9 100644 --- a/storage/heap/hp_test2.c +++ b/storage/heap/hp_test2.c @@ -42,8 +42,8 @@ static my_bool key3[MAX_RECORDS]; static int reclength=39; -static int calc_check(byte *buf,uint length); -static void make_record(char *record, uint n1, uint n2, uint n3, +static int calc_check(uchar *buf,uint length); +static void make_record(uchar *record, uint n1, uint n2, uint n3, const char *mark, uint count); /* Main program */ @@ -56,9 +56,10 @@ int main(int argc, char *argv[]) int error; ulong pos; unsigned long key_check; - char record[128],record2[128],record3[128],key[10]; + uchar record[128],record2[128],record3[128],key[10]; const char *filename,*filename2; HP_INFO *file,*file2; + HP_SHARE *tmp_share; HP_KEYDEF keyinfo[MAX_KEYS]; HA_KEYSEG keyseg[MAX_KEYS*5]; HEAP_PTR position; @@ -126,13 +127,13 @@ int main(int argc, char *argv[]) printf("- Creating heap-file\n"); if (heap_create(filename,keys,keyinfo,reclength,(ulong) flag*100000L, - (ulong) recant/2, &hp_create_info) || + (ulong) recant/2, &hp_create_info, &tmp_share) || !(file= heap_open(filename, 2))) goto err; signal(SIGINT,endprog); printf("- Writing records:s\n"); - strmov(record," ..... key"); + strmov((char*) record," ..... key"); for (i=0 ; i < recant ; i++) { @@ -178,10 +179,10 @@ int main(int argc, char *argv[]) for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ; if (j != 0) { - sprintf(key,"%6d",j); + sprintf((char*) key,"%6d",j); if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT)) { - printf("can't find key1: \"%s\"\n",key); + printf("can't find key1: \"%s\"\n",(char*) key); goto err; } #ifdef NOT_USED @@ -191,13 +192,13 @@ int main(int argc, char *argv[]) #endif if (heap_delete(file,record)) { - printf("error: %d; can't delete record: \"%s\"\n", my_errno,record); + printf("error: %d; can't delete record: \"%s\"\n", my_errno,(char*) record); goto err; } opt_delete++; - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; - key_check-=atoi(record); + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; + key_check-=atoi((char*) record); if (testflag == 2 && heap_check_heap(file,0)) { puts("Heap keys crashed"); @@ -238,10 +239,10 @@ int main(int argc, char *argv[]) for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ; if (!key1[j]) continue; - sprintf(key,"%6d",j); + sprintf((char*) key,"%6d",j); if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT)) { - printf("can't find key1: \"%s\"\n",key); + printf("can't find key1: \"%s\"\n",(char*) key); goto err; } } @@ -250,19 +251,20 @@ int main(int argc, char *argv[]) if (my_errno != HA_ERR_FOUND_DUPP_KEY || key3[n3] == 0) { printf("error: %d; can't update:\nFrom: \"%s\"\nTo: \"%s\"\n", - my_errno,record,record2); + my_errno,(char*) record, (char*) record2); goto err; } if (verbose) - printf("Double key when tried to update:\nFrom: \"%s\"\nTo: \"%s\"\n",record,record2); + printf("Double key when tried to update:\nFrom: \"%s\"\nTo: \"%s\"\n", + (char*) record, (char*) record2); } else { - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; key1[n1]++; key3[n3]=1; update++; - key_check=key_check-atoi(record)+n1; + key_check=key_check-atoi((char*) record)+n1; } if (testflag == 3 && heap_check_heap(file,0)) { @@ -280,7 +282,7 @@ int main(int argc, char *argv[]) for (i=999, dupp_keys=found_key=0 ; i>0 ; i--) { if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; } - sprintf(key,"%6d",found_key); + sprintf((char*) key,"%6d",found_key); } if (dupp_keys > 3) @@ -293,9 +295,9 @@ int main(int argc, char *argv[]) goto err; if (heap_rnext(file,record3)) goto err; if (heap_delete(file,record3)) goto err; - key_check-=atoi(record3); - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; + key_check-=atoi((char*) record3); + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; opt_delete++; ant=2; while ((error=heap_rnext(file,record3)) == 0 || @@ -320,16 +322,16 @@ int main(int argc, char *argv[]) if (heap_rlast(file,record3,0)) goto err; if (heap_delete(file,record3)) goto err; - key_check-=atoi(record3); - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; + key_check-=atoi((char*) record3); + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; opt_delete++; if (heap_rprev(file,record3) || heap_rprev(file,record3)) goto err; if (heap_delete(file,record3)) goto err; - key_check-=atoi(record3); - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; + key_check-=atoi((char*) record3); + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; opt_delete++; ant=3; while ((error=heap_rprev(file,record3)) == 0 || @@ -364,10 +366,10 @@ int main(int argc, char *argv[]) if (error) goto err; if (heap_delete(file,record3)) goto err; - key_check-=atoi(record3); + key_check-=atoi((char*) record3); opt_delete++; - key1[atoi(record+keyinfo[0].seg[0].start)]--; - key3[atoi(record+keyinfo[2].seg[0].start)]=0; + key1[atoi((char*) record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0; ant=0; while ((error=heap_scan(file,record3)) == 0 || error == HA_ERR_RECORD_DELETED) @@ -509,7 +511,7 @@ int main(int argc, char *argv[]) for (i=999, dupp_keys=found_key=0 ; i>0 ; i--) { if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; } - sprintf(key,"%6d",found_key); + sprintf((char*) key,"%6d",found_key); } printf("- Read through all keys with first-next-last-prev\n"); ant=0; @@ -562,8 +564,9 @@ int main(int argc, char *argv[]) heap_close(file2); printf("- Creating output heap-file 2\n"); - if (heap_create(filename2,1,keyinfo,reclength,0L,0L,&hp_create_info) || - !(file2= heap_open(filename2, 2))) + if (heap_create(filename2, 1, keyinfo, reclength, 0L, 0L, &hp_create_info, + &tmp_share) || + !(file2= heap_open_from_share_and_register(tmp_share, 2))) goto err; printf("- Copying and removing records\n"); @@ -575,7 +578,7 @@ int main(int argc, char *argv[]) { if (heap_write(file2,record)) goto err; - key_check-=atoi(record); + key_check-=atoi((char*) record); write_count++; if (heap_delete(file,record)) goto err; @@ -674,7 +677,7 @@ static sig_handler endprog(int sig_number __attribute__((unused))) } } -static int calc_check(byte *buf, uint length) +static int calc_check(uchar *buf, uint length) { int check=0; while (length--) @@ -682,11 +685,11 @@ static int calc_check(byte *buf, uint length) return check; } -static void make_record(char *record, uint n1, uint n2, uint n3, +static void make_record(uchar *record, uint n1, uint n2, uint n3, const char *mark, uint count) { bfill(record,reclength,' '); - sprintf(record,"%6d:%4d:%8d:%3.3s: %4d", + sprintf((char*) record,"%6d:%4d:%8d:%3.3s: %4d", n1,n2,n3,mark,count); record[37]='A'; /* Store A in null key */ record[38]=1; /* set as null */ diff --git a/storage/heap/hp_update.c b/storage/heap/hp_update.c index e7314e3d38c..11dca974ad4 100644 --- a/storage/heap/hp_update.c +++ b/storage/heap/hp_update.c @@ -17,10 +17,10 @@ #include "heapdef.h" -int heap_update(HP_INFO *info, const byte *old, const byte *heap_new) +int heap_update(HP_INFO *info, const uchar *old, const uchar *heap_new) { HP_KEYDEF *keydef, *end, *p_lastinx; - byte *pos; + uchar *pos; bool auto_key_changed= 0; HP_SHARE *share= info->s; DBUG_ENTER("heap_update"); diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index 86e79c9d7ec..2abef2d9b43 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -25,14 +25,14 @@ #define HIGHFIND 4 #define HIGHUSED 8 -static byte *next_free_record_pos(HP_SHARE *info); +static uchar *next_free_record_pos(HP_SHARE *info); static HASH_INFO *hp_find_free_hash(HP_SHARE *info, HP_BLOCK *block, ulong records); -int heap_write(HP_INFO *info, const byte *record) +int heap_write(HP_INFO *info, const uchar *record) { HP_KEYDEF *keydef, *end; - byte *pos; + uchar *pos; HP_SHARE *share=info->s; DBUG_ENTER("heap_write"); #ifndef DBUG_OFF @@ -88,7 +88,7 @@ err: } share->deleted++; - *((byte**) pos)=share->del_link; + *((uchar**) pos)=share->del_link; share->del_link=pos; pos[share->reclength]=0; /* Record deleted */ @@ -99,13 +99,12 @@ err: Write a key to rb_tree-index */ -int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *record, - byte *recpos) +int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *record, + uchar *recpos) { heap_rb_param custom_arg; uint old_allocated; - info->last_pos= NULL; /* For heap_rnext/heap_rprev */ custom_arg.keyseg= keyinfo->seg; custom_arg.key_length= hp_rb_make_key(keyinfo, info->recbuf, record, recpos); if (keyinfo->flag & HA_NOSAME) @@ -131,17 +130,17 @@ int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *record, /* Find where to place new record */ -static byte *next_free_record_pos(HP_SHARE *info) +static uchar *next_free_record_pos(HP_SHARE *info) { int block_pos; - byte *pos; - ulong length; + uchar *pos; + size_t length; DBUG_ENTER("next_free_record_pos"); if (info->del_link) { pos=info->del_link; - info->del_link= *((byte**) pos); + info->del_link= *((uchar**) pos); info->deleted--; DBUG_PRINT("exit",("Used old position: 0x%lx",(long) pos)); DBUG_RETURN(pos); @@ -159,9 +158,9 @@ static byte *next_free_record_pos(HP_SHARE *info) info->data_length+=length; } DBUG_PRINT("exit",("Used new position: 0x%lx", - (long) ((byte*) info->block.level_info[0].last_blocks+ + (long) ((uchar*) info->block.level_info[0].last_blocks+ block_pos * info->block.recbuffer))); - DBUG_RETURN((byte*) info->block.level_info[0].last_blocks+ + DBUG_RETURN((uchar*) info->block.level_info[0].last_blocks+ block_pos*info->block.recbuffer); } @@ -192,12 +191,12 @@ static byte *next_free_record_pos(HP_SHARE *info) */ int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, - const byte *record, byte *recpos) + const uchar *record, uchar *recpos) { HP_SHARE *share = info->s; int flag; ulong halfbuff,hashnr,first_index; - byte *ptr_to_rec,*ptr_to_rec2; + uchar *ptr_to_rec,*ptr_to_rec2; HASH_INFO *empty,*gpos,*gpos2,*pos; DBUG_ENTER("hp_write_key"); @@ -391,7 +390,7 @@ static HASH_INFO *hp_find_free_hash(HP_SHARE *info, HP_BLOCK *block, ulong records) { uint block_pos; - ulong length; + size_t length; if (records < block->last_allocated) return hp_find_hash(block,records); @@ -402,6 +401,6 @@ static HASH_INFO *hp_find_free_hash(HP_SHARE *info, info->index_length+=length; } block->last_allocated=records+1; - return((HASH_INFO*) ((byte*) block->level_info[0].last_blocks+ + return((HASH_INFO*) ((uchar*) block->level_info[0].last_blocks+ block_pos*block->recbuffer)); } diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index beeee5310c2..47b1a566cd8 100644..100755 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -13,17 +13,18 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -#SET(CMAKE_CXX_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") -#SET(CMAKE_C_FLAGS_DEBUG "-DSAFEMALLOC -DSAFE_MUTEX") -ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -DWIN32 -D_LIB) +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") +ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -D_LIB) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib - include - handler + ${CMAKE_SOURCE_DIR}/storage/innobase/include + ${CMAKE_SOURCE_DIR}/storage/innobase/handler ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c + +SET(INNOBASE_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c buf/buf0buf.c buf/buf0flu.c buf/buf0lru.c buf/buf0rea.c data/data0data.c data/data0type.c dict/dict0boot.c dict/dict0crea.c dict/dict0dict.c dict/dict0load.c dict/dict0mem.c @@ -54,3 +55,7 @@ ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c trx/trx0purge.c trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c usr/usr0sess.c ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c ut/ut0vec.c ut/ut0list.c ut/ut0wqueue.c) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(innobase ${INNOBASE_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/innobase/Makefile.am b/storage/innobase/Makefile.am index f433604f9d4..30e056d68fb 100644 --- a/storage/innobase/Makefile.am +++ b/storage/innobase/Makefile.am @@ -25,100 +25,149 @@ INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \ -I$(top_srcdir)/sql \ -I$(srcdir) -AUTOMAKE_OPTIONS = foreign -TAR = gtar +DEFS = @DEFS@ -noinst_HEADERS = -SUBDIRS = os ut btr buf data dict dyn eval fil fsp fut \ - ha ibuf lock log mach mem mtr page \ - handler \ - pars que read rem row srv sync thr trx usr +noinst_HEADERS = include/btr0btr.h include/btr0btr.ic \ + include/btr0cur.h include/btr0cur.ic \ + include/btr0pcur.h include/btr0pcur.ic \ + include/btr0sea.h include/btr0sea.ic \ + include/btr0types.h include/buf0buf.h \ + include/buf0buf.ic include/buf0flu.h \ + include/buf0flu.ic include/buf0lru.h \ + include/buf0lru.ic include/buf0rea.h \ + include/buf0types.h include/data0data.h \ + include/data0data.ic include/data0type.h \ + include/data0type.ic include/data0types.h \ + include/db0err.h include/dict0boot.h \ + include/dict0boot.ic include/dict0crea.h \ + include/dict0crea.ic include/dict0dict.h \ + include/dict0dict.ic include/dict0load.h \ + include/dict0load.ic include/dict0mem.h \ + include/dict0mem.ic include/dict0types.h \ + include/dyn0dyn.h include/dyn0dyn.ic \ + include/eval0eval.h include/eval0eval.ic \ + include/eval0proc.h include/eval0proc.ic \ + include/fil0fil.h include/fsp0fsp.h \ + include/fsp0fsp.ic include/fut0fut.h \ + include/fut0fut.ic include/fut0lst.h \ + include/fut0lst.ic include/ha0ha.h \ + include/ha0ha.ic include/hash0hash.h \ + include/hash0hash.ic include/ibuf0ibuf.h \ + include/ibuf0ibuf.ic include/ibuf0types.h \ + include/lock0iter.h \ + include/lock0lock.h include/lock0lock.ic \ + include/lock0priv.h include/lock0priv.ic \ + include/lock0types.h include/log0log.h \ + include/log0log.ic include/log0recv.h \ + include/log0recv.ic include/mach0data.h \ + include/mach0data.ic include/mem0dbg.h \ + include/mem0dbg.ic mem/mem0dbg.c \ + include/mem0mem.h include/mem0mem.ic \ + include/mem0pool.h include/mem0pool.ic \ + include/mtr0log.h include/mtr0log.ic \ + include/mtr0mtr.h include/mtr0mtr.ic \ + include/mtr0types.h include/os0file.h \ + include/os0proc.h include/os0proc.ic \ + include/os0sync.h include/os0sync.ic \ + include/os0thread.h include/os0thread.ic \ + include/page0cur.h include/page0cur.ic \ + include/page0page.h include/page0page.ic \ + include/page0types.h include/pars0grm.h \ + include/pars0opt.h include/pars0opt.ic \ + include/pars0pars.h include/pars0pars.ic \ + include/pars0sym.h include/pars0sym.ic \ + include/pars0types.h include/que0que.h \ + include/que0que.ic include/que0types.h \ + include/read0read.h include/read0read.ic \ + include/read0types.h include/rem0cmp.h \ + include/rem0cmp.ic include/rem0rec.h \ + include/rem0rec.ic include/rem0types.h \ + include/row0ins.h include/row0ins.ic \ + include/row0mysql.h include/row0mysql.ic \ + include/row0purge.h include/row0purge.ic \ + include/row0row.h include/row0row.ic \ + include/row0sel.h include/row0sel.ic \ + include/row0types.h include/row0uins.h \ + include/row0uins.ic include/row0umod.h \ + include/row0umod.ic include/row0undo.h \ + include/row0undo.ic include/row0upd.h \ + include/row0upd.ic include/row0vers.h \ + include/row0vers.ic include/srv0que.h \ + include/srv0srv.h include/srv0srv.ic \ + include/srv0start.h include/sync0arr.h \ + include/sync0arr.ic include/sync0rw.h \ + include/sync0rw.ic include/sync0sync.h \ + include/sync0sync.ic include/sync0types.h \ + include/thr0loc.h include/thr0loc.ic \ + include/trx0purge.h include/trx0purge.ic \ + include/trx0rec.h include/trx0rec.ic \ + include/trx0roll.h include/trx0roll.ic \ + include/trx0rseg.h include/trx0rseg.ic \ + include/trx0sys.h include/trx0sys.ic \ + include/trx0trx.h include/trx0trx.ic \ + include/trx0types.h include/trx0undo.h \ + include/trx0undo.ic include/trx0xa.h \ + include/univ.i include/usr0sess.h \ + include/usr0sess.ic include/usr0types.h \ + include/ut0byte.h include/ut0byte.ic \ + include/ut0dbg.h include/ut0lst.h \ + include/ut0mem.h include/ut0mem.ic \ + include/ut0rnd.h include/ut0rnd.ic \ + include/ut0sort.h include/ut0ut.h \ + include/ut0ut.ic include/ut0vec.h \ + include/ut0vec.ic include/ut0list.h \ + include/ut0list.ic include/ut0wqueue.h \ + include/ha_prototypes.h handler/ha_innodb.h -EXTRA_DIST = include/btr0btr.h include/btr0btr.ic include/btr0cur.h include/btr0cur.ic \ - include/btr0pcur.h include/btr0pcur.ic include/btr0sea.h include/btr0sea.ic \ - include/btr0types.h \ - include/buf0buf.h include/buf0buf.ic include/buf0flu.h include/buf0flu.ic \ - include/buf0lru.h include/buf0lru.ic include/buf0rea.h include/buf0types.h \ - include/data0data.h include/data0data.ic include/data0type.h include/data0type.ic \ - include/data0types.h include/db0err.h \ - include/dict0boot.h include/dict0boot.ic include/dict0crea.h include/dict0crea.ic \ - include/dict0dict.h include/dict0dict.ic include/dict0load.h include/dict0load.ic \ - include/dict0mem.h include/dict0mem.ic include/dict0types.h \ - include/dyn0dyn.h include/dyn0dyn.ic \ - include/eval0eval.h include/eval0eval.ic include/eval0proc.h include/eval0proc.ic \ - include/fil0fil.h include/fsp0fsp.h include/fsp0fsp.ic \ - include/fut0fut.h include/fut0fut.ic include/fut0lst.h include/fut0lst.ic \ - include/ha0ha.h include/ha0ha.ic include/hash0hash.h include/hash0hash.ic \ - include/ibuf0ibuf.h include/ibuf0ibuf.ic include/ibuf0types.h \ - include/lock0lock.h include/lock0lock.ic include/lock0types.h \ - include/log0log.h include/log0log.ic include/log0recv.h include/log0recv.ic \ - include/mach0data.h include/mach0data.ic include/mem0dbg.h include/mem0dbg.ic \ - include/mem0mem.h include/mem0mem.ic include/mem0pool.h include/mem0pool.ic \ - include/mtr0log.h include/mtr0log.ic include/mtr0mtr.h include/mtr0mtr.ic \ - include/mtr0types.h include/os0file.h \ - include/os0proc.h include/os0proc.ic include/os0sync.h include/os0sync.ic \ - include/os0thread.h include/os0thread.ic \ - include/page0cur.h include/page0cur.ic include/page0page.h include/page0page.ic \ - include/page0types.h \ - include/pars0grm.h include/pars0opt.h include/pars0opt.ic \ - include/pars0pars.h include/pars0pars.ic include/pars0sym.h include/pars0sym.ic \ - include/pars0types.h \ - include/que0que.h include/que0que.ic include/que0types.h \ - include/read0read.h include/read0read.ic include/read0types.h \ - include/rem0cmp.h include/rem0cmp.ic include/rem0rec.h include/rem0rec.ic \ - include/rem0types.h \ - include/row0ins.h include/row0ins.ic include/row0mysql.h include/row0mysql.ic \ - include/row0purge.h include/row0purge.ic include/row0row.h include/row0row.ic \ - include/row0sel.h include/row0sel.ic include/row0types.h \ - include/row0uins.h include/row0uins.ic include/row0umod.h include/row0umod.ic \ - include/row0undo.h include/row0undo.ic include/row0upd.h include/row0upd.ic \ - include/row0vers.h include/row0vers.ic \ - include/srv0que.h include/srv0srv.h include/srv0srv.ic include/srv0start.h \ - include/sync0arr.h include/sync0arr.ic include/sync0rw.h include/sync0rw.ic \ - include/sync0sync.h include/sync0sync.ic include/sync0types.h \ - include/thr0loc.h include/thr0loc.ic \ - include/trx0purge.h include/trx0purge.ic include/trx0rec.h include/trx0rec.ic \ - include/trx0roll.h include/trx0roll.ic include/trx0rseg.h include/trx0rseg.ic \ - include/trx0sys.h include/trx0sys.ic include/trx0trx.h include/trx0trx.ic \ - include/trx0types.h include/trx0undo.h include/trx0undo.ic include/trx0xa.h \ - include/univ.i include/usr0sess.h include/usr0sess.ic include/usr0types.h \ - include/ut0byte.h include/ut0byte.ic include/ut0dbg.h include/ut0lst.h \ - include/ut0mem.h include/ut0mem.ic include/ut0rnd.h include/ut0rnd.ic \ - handler/ha_innodb.h \ - include/ut0sort.h include/ut0ut.h include/ut0ut.ic include/ut0vec.h include/ut0vec.ic include/ha_prototypes.h \ - include/ut0list.h include/ut0list.ic \ - include/ut0wqueue.h \ - CMakeLists.txt plug.in +EXTRA_LIBRARIES = libinnobase.a +noinst_LIBRARIES = @plugin_innobase_static_target@ +libinnobase_a_SOURCES = btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c \ + btr/btr0sea.c buf/buf0buf.c buf/buf0flu.c \ + buf/buf0lru.c buf/buf0rea.c data/data0data.c \ + data/data0type.c dict/dict0boot.c \ + dict/dict0crea.c dict/dict0dict.c \ + dict/dict0load.c dict/dict0mem.c dyn/dyn0dyn.c \ + eval/eval0eval.c eval/eval0proc.c \ + fil/fil0fil.c fsp/fsp0fsp.c fut/fut0fut.c \ + fut/fut0lst.c ha/ha0ha.c ha/hash0hash.c \ + ibuf/ibuf0ibuf.c lock/lock0iter.c \ + lock/lock0lock.c \ + log/log0log.c log/log0recv.c mach/mach0data.c \ + mem/mem0mem.c mem/mem0pool.c mtr/mtr0log.c \ + mtr/mtr0mtr.c os/os0file.c os/os0proc.c \ + os/os0sync.c os/os0thread.c page/page0cur.c \ + page/page0page.c pars/lexyy.c pars/pars0grm.c \ + pars/pars0opt.c pars/pars0pars.c \ + pars/pars0sym.c que/que0que.c read/read0read.c \ + rem/rem0cmp.c rem/rem0rec.c row/row0ins.c \ + row/row0mysql.c row/row0purge.c row/row0row.c \ + row/row0sel.c row/row0uins.c row/row0umod.c \ + row/row0undo.c row/row0upd.c row/row0vers.c \ + srv/srv0que.c srv/srv0srv.c srv/srv0start.c \ + sync/sync0arr.c sync/sync0rw.c \ + sync/sync0sync.c thr/thr0loc.c trx/trx0purge.c \ + trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c \ + trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c \ + usr/usr0sess.c ut/ut0byte.c ut/ut0dbg.c \ + ut/ut0list.c ut/ut0mem.c ut/ut0rnd.c \ + ut/ut0ut.c ut/ut0vec.c ut/ut0wqueue.c \ + handler/ha_innodb.cc -noinst_LIBRARIES = libinnobase.a -libinnobase_a_LIBADD = usr/libusr.a srv/libsrv.a dict/libdict.a \ - que/libque.a srv/libsrv.a ibuf/libibuf.a \ - row/librow.a pars/libpars.a btr/libbtr.a \ - trx/libtrx.a read/libread.a usr/libusr.a \ - buf/libbuf.a ibuf/libibuf.a eval/libeval.a \ - log/liblog.a fsp/libfsp.a fut/libfut.a \ - fil/libfil.a lock/liblock.a mtr/libmtr.a \ - page/libpage.a rem/librem.a thr/libthr.a \ - sync/libsync.a data/libdata.a mach/libmach.a \ - ha/libha.a dyn/libdyn.a mem/libmem.a \ - handler/libhandler.a \ - ut/libut.a os/libos.a ut/libut.a -libinnobase_a_SOURCES = +libinnobase_a_CXXFLAGS= $(AM_CFLAGS) +libinnobase_a_CFLAGS = $(AM_CFLAGS) +EXTRA_LTLIBRARIES = ha_innodb.la +pkglib_LTLIBRARIES = @plugin_innobase_shared_target@ -libinnobase.a: $(libinnobase_a_LIBADD) - -rm -f $@ - if test "$(host_os)" = "netware" ; \ - then \ - $(libinnobase_a_AR) $@ $(libinnobase_a_LIBADD) ; \ - else \ - for arc in $(libinnobase_a_LIBADD); do \ - arpath=`echo $$arc|sed 's|[^/]*$$||'`; \ - $(AR) t $$arc|sed "s|^|$$arpath|"; \ - done | sort -u | xargs $(AR) cq $@ ; \ - $(RANLIB) $@ ; \ - fi +ha_innodb_la_LDFLAGS = -module -rpath $(MYSQLLIBdir) +ha_innodb_la_CXXFLAGS= $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN +ha_innodb_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN +ha_innodb_la_SOURCES = $(libinnobase_a_SOURCES) + +EXTRA_DIST = CMakeLists.txt plug.in \ + pars/make_bison.sh pars/make_flex.sh \ + pars/pars0grm.y pars/pars0lex.l # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/innobase/btr/Makefile.am b/storage/innobase/btr/Makefile.am deleted file mode 100644 index 6b09b289cdc..00000000000 --- a/storage/innobase/btr/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libbtr.a - -libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c index 50a349e78d6..6e8b43aeb8d 100644 --- a/storage/innobase/btr/btr0btr.c +++ b/storage/innobase/btr/btr0btr.c @@ -2606,8 +2606,11 @@ btr_index_rec_validate( rec_get_nth_field(rec, offsets, i, &len); - /* Note that prefix indexes are not fixed size even when - their type is CHAR. */ + /* Note that if fixed_size != 0, it equals the + length of a fixed-size column in the clustered index. + A prefix index of the column is of fixed, but different + length. When fixed_size == 0, prefix_len is the maximum + length of the prefix index column. */ if ((dict_index_get_nth_field(index, i)->prefix_len == 0 && len != UNIV_SQL_NULL && fixed_size diff --git a/storage/innobase/buf/Makefile.am b/storage/innobase/buf/Makefile.am deleted file mode 100644 index 946d5a2e5c2..00000000000 --- a/storage/innobase/buf/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libbuf.a - -libbuf_a_SOURCES = buf0buf.c buf0flu.c buf0lru.c buf0rea.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index ad775fbd6d5..469d3ac05d7 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -802,9 +802,7 @@ buf_awe_map_page_to_frame( { buf_block_t* bck; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(block); if (block->frame) { @@ -900,15 +898,12 @@ buf_block_make_young( /*=================*/ buf_block_t* block) /* in: block to make younger */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* Note that we read freed_page_clock's without holding any mutex: this is allowed since the result is used only in heuristics */ - if (buf_pool->freed_page_clock >= block->freed_page_clock - + 1 + (buf_pool->curr_size / 4)) { + if (buf_block_peek_if_too_old(block)) { mutex_enter(&buf_pool->mutex); /* There has been freeing activity in the LRU list: @@ -1635,10 +1630,9 @@ buf_page_init( in units of a page */ buf_block_t* block) /* in: block to init */ { -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&(block->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(block->state != BUF_BLOCK_FILE_PAGE); /* Set the state of the block */ @@ -1653,6 +1647,15 @@ buf_page_init( block->lock_hash_val = lock_rec_hash(space, offset); +#ifdef UNIV_DEBUG_VALGRIND + if (!space) { + /* Silence valid Valgrind warnings about uninitialized + data being written to data files. There are some unused + bytes on some pages that InnoDB does not initialize. */ + UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE); + } +#endif /* UNIV_DEBUG_VALGRIND */ + /* Insert into the hash table of file pages */ if (buf_page_hash_get(space, offset)) { diff --git a/storage/innobase/buf/buf0flu.c b/storage/innobase/buf/buf0flu.c index 64060dab8ae..423c08c0569 100644 --- a/storage/innobase/buf/buf0flu.c +++ b/storage/innobase/buf/buf0flu.c @@ -48,10 +48,7 @@ buf_flush_insert_into_flush_list( /*=============================*/ buf_block_t* block) /* in: block which is modified */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL) @@ -77,9 +74,7 @@ buf_flush_insert_sorted_into_flush_list( buf_block_t* prev_b; buf_block_t* b; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ prev_b = NULL; b = UT_LIST_GET_FIRST(buf_pool->flush_list); @@ -111,10 +106,8 @@ buf_flush_ready_for_replace( buf_block_t* block) /* in: buffer control block, must be in state BUF_BLOCK_FILE_PAGE and in the LRU list */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (block->state != BUF_BLOCK_FILE_PAGE) { ut_print_timestamp(stderr); fprintf(stderr, @@ -147,10 +140,8 @@ buf_flush_ready_for_flush( BUF_BLOCK_FILE_PAGE */ ulint flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&(block->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(block->state == BUF_BLOCK_FILE_PAGE); if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0) @@ -977,8 +968,7 @@ buf_flush_batch( } #endif /* UNIV_DEBUG */ - if (page_count != ULINT_UNDEFINED) - srv_buf_pool_flushed+= page_count; + srv_buf_pool_flushed += page_count; return(page_count); } diff --git a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c index 377552ece6c..7b49a7641af 100644 --- a/storage/innobase/buf/buf0lru.c +++ b/storage/innobase/buf/buf0lru.c @@ -244,7 +244,15 @@ buf_LRU_search_and_free_block( frame at all */ if (block->frame) { + /* The page was declared uninitialized + by buf_LRU_block_remove_hashed_page(). + We need to flag the contents of the + page valid (which it still is) in + order to avoid bogus Valgrind + warnings. */ + UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE); btr_search_drop_page_hash_index(block->frame); + UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); } ut_a(block->buf_fix_count == 0); @@ -449,6 +457,7 @@ loop: mutex_enter(&block->mutex); block->state = BUF_BLOCK_READY_FOR_USE; + UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); mutex_exit(&block->mutex); @@ -549,9 +558,7 @@ buf_LRU_old_adjust_len(void) ulint new_len; ut_a(buf_pool->LRU_old); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5); for (;;) { @@ -593,6 +600,7 @@ buf_LRU_old_init(void) { buf_block_t* block; + ut_ad(mutex_own(&(buf_pool->mutex))); ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN); /* We first initialize all blocks in the LRU list as old and then use @@ -624,9 +632,7 @@ buf_LRU_remove_block( { ut_ad(buf_pool); ut_ad(block); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(block->in_LRU_list); @@ -690,9 +696,7 @@ buf_LRU_add_block_to_end_low( ut_ad(buf_pool); ut_ad(block); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(block->state == BUF_BLOCK_FILE_PAGE); @@ -755,9 +759,7 @@ buf_LRU_add_block_low( ut_ad(buf_pool); ut_ad(block); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(!block->in_LRU_list); @@ -858,10 +860,9 @@ buf_LRU_block_free_non_file_page( /*=============================*/ buf_block_t* block) /* in: block, must not contain a file page */ { -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(block); ut_a((block->state == BUF_BLOCK_MEMORY) @@ -872,6 +873,7 @@ buf_LRU_block_free_non_file_page( block->state = BUF_BLOCK_NOT_USED; + UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); #ifdef UNIV_DEBUG /* Wipe contents of page to reveal possible stale pointers to it */ memset(block->frame, '\0', UNIV_PAGE_SIZE); @@ -879,6 +881,8 @@ buf_LRU_block_free_non_file_page( UT_LIST_ADD_FIRST(free, buf_pool->free, block); block->in_free_list = TRUE; + UNIV_MEM_FREE(block->frame, UNIV_PAGE_SIZE); + if (srv_use_awe && block->frame) { /* Add to the list of mapped pages */ @@ -898,10 +902,8 @@ buf_LRU_block_remove_hashed_page( be in a state where it can be freed; there may or may not be a hash index to the page */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(block); ut_a(block->state == BUF_BLOCK_FILE_PAGE); @@ -949,6 +951,7 @@ buf_LRU_block_remove_hashed_page( buf_page_address_fold(block->space, block->offset), block); + UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); block->state = BUF_BLOCK_REMOVE_HASH; } @@ -961,10 +964,9 @@ buf_LRU_block_free_hashed_page( buf_block_t* block) /* in: block, must contain a file page and be in a state where it can be freed */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); -#endif /* UNIV_SYNC_DEBUG */ + ut_a(block->state == BUF_BLOCK_REMOVE_HASH); block->state = BUF_BLOCK_MEMORY; diff --git a/storage/innobase/data/Makefile.am b/storage/innobase/data/Makefile.am deleted file mode 100644 index 6f9407d40e5..00000000000 --- a/storage/innobase/data/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libdata.a - -libdata_a_SOURCES = data0data.c data0type.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/data/data0data.c b/storage/innobase/data/data0data.c index fc4494d991a..0f03de4ca9d 100644 --- a/storage/innobase/data/data0data.c +++ b/storage/innobase/data/data0data.c @@ -18,6 +18,8 @@ Created 5/30/1994 Heikki Tuuri #include "dict0dict.h" #include "btr0cur.h" +#include <ctype.h> + #ifdef UNIV_DEBUG byte data_error; /* data pointers of tuple fields are initialized to point here for error checking */ diff --git a/storage/innobase/data/data0type.c b/storage/innobase/data/data0type.c index 77779d185cf..305000d7c0a 100644 --- a/storage/innobase/data/data0type.c +++ b/storage/innobase/data/data0type.c @@ -190,7 +190,8 @@ dtype_validate( dtype_t* type) /* in: type struct to validate */ { ut_a(type); - ut_a((type->mtype >= DATA_VARCHAR) && (type->mtype <= DATA_MYSQL)); + ut_a(type->mtype >= DATA_VARCHAR); + ut_a(type->mtype <= DATA_MYSQL); if (type->mtype == DATA_SYS) { ut_a((type->prtype & DATA_MYSQL_TYPE_MASK) < DATA_N_SYS_COLS); diff --git a/storage/innobase/dict/Makefile.am b/storage/innobase/dict/Makefile.am deleted file mode 100644 index 15cacca6f58..00000000000 --- a/storage/innobase/dict/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libdict.a - -libdict_a_SOURCES = dict0boot.c dict0crea.c dict0dict.c dict0load.c\ - dict0mem.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/dict/dict0boot.c b/storage/innobase/dict/dict0boot.c index 08515d8fb13..5f9aaf71e18 100644 --- a/storage/innobase/dict/dict0boot.c +++ b/storage/innobase/dict/dict0boot.c @@ -86,9 +86,7 @@ dict_hdr_flush_row_id(void) dulint id; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ id = dict_sys->row_id; @@ -213,6 +211,7 @@ dict_boot(void) dict_table_t* table; dict_index_t* index; dict_hdr_t* dict_hdr; + mem_heap_t* heap; mtr_t mtr; mtr_start(&mtr); @@ -220,6 +219,8 @@ dict_boot(void) /* Create the hash tables etc. */ dict_init(); + heap = mem_heap_create(450); + mutex_enter(&(dict_sys->mutex)); /* Get the dictionary header */ @@ -246,19 +247,20 @@ dict_boot(void) /*-------------------------*/ table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "N_COLS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "MIX_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "MIX_LEN", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "CLUSTER_NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "SPACE", DATA_INT, 0, 4); table->id = DICT_TABLES_ID; - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); dict_sys->sys_tables = table; + mem_heap_empty(heap); index = dict_mem_index_create("SYS_TABLES", "CLUST_IND", DICT_HDR_SPACE, @@ -285,18 +287,19 @@ dict_boot(void) /*-------------------------*/ table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0); - dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "MTYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "PRTYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "LEN", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "PREC", DATA_INT, 0, 4); table->id = DICT_COLUMNS_ID; - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); dict_sys->sys_columns = table; + mem_heap_empty(heap); index = dict_mem_index_create("SYS_COLUMNS", "CLUST_IND", DICT_HDR_SPACE, @@ -313,13 +316,13 @@ dict_boot(void) /*-------------------------*/ table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0); - dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "N_FIELDS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "SPACE", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "PAGE_NO", DATA_INT, 0, 4); /* The '+ 2' below comes from the 2 system fields */ #if DICT_SYS_INDEXES_PAGE_NO_FIELD != 6 + 2 @@ -333,8 +336,9 @@ dict_boot(void) #endif table->id = DICT_INDEXES_ID; - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); dict_sys->sys_indexes = table; + mem_heap_empty(heap); index = dict_mem_index_create("SYS_INDEXES", "CLUST_IND", DICT_HDR_SPACE, @@ -351,13 +355,14 @@ dict_boot(void) /*-------------------------*/ table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0); - dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4); - dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "INDEX_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "COL_NAME", DATA_BINARY, 0, 0); table->id = DICT_FIELDS_ID; - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); dict_sys->sys_fields = table; + mem_heap_free(heap); index = dict_mem_index_create("SYS_FIELDS", "CLUST_IND", DICT_HDR_SPACE, diff --git a/storage/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c index 33e328d1e0b..4116230347d 100644 --- a/storage/innobase/dict/dict0crea.c +++ b/storage/innobase/dict/dict0crea.c @@ -212,9 +212,7 @@ dict_build_table_def_step( ulint i; ulint row_len; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ table = node->table; @@ -255,7 +253,7 @@ dict_build_table_def_step( error = fil_create_new_single_table_tablespace( &space, path_or_name, is_path, FIL_IBD_FILE_INITIAL_SIZE); - table->space = space; + table->space = (unsigned int) space; if (error != DB_SUCCESS) { @@ -312,9 +310,7 @@ dict_create_sys_indexes_tuple( dfield_t* dfield; byte* ptr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(index && heap); sys_indexes = dict_sys->sys_indexes; @@ -512,9 +508,7 @@ dict_build_index_def_step( dtuple_t* row; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = thr_get_trx(thr); @@ -585,9 +579,7 @@ dict_create_index_tree_step( btr_pcur_t pcur; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ index = node->index; table = node->table; @@ -642,10 +634,7 @@ dict_drop_index_tree( byte* ptr; ulint len; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len); @@ -718,10 +707,7 @@ dict_truncate_index_tree( ulint comp; dict_index_t* index; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); rec = btr_pcur_get_rec(pcur); ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len); @@ -806,7 +792,7 @@ dict_truncate_index_tree( root_page_no = btr_create(type, space, index_id, comp, mtr); if (index) { - index->page = root_page_no; + index->page = (unsigned int) root_page_no; } else { ut_print_timestamp(stderr); fprintf(stderr, @@ -907,9 +893,7 @@ dict_create_table_step( trx_t* trx; ut_ad(thr); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = thr_get_trx(thr); @@ -976,7 +960,7 @@ dict_create_table_step( if (node->state == TABLE_ADD_TO_CACHE) { - dict_table_add_to_cache(node->table); + dict_table_add_to_cache(node->table, node->heap); err = DB_SUCCESS; } @@ -1016,9 +1000,7 @@ dict_create_index_step( trx_t* trx; ut_ad(thr); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = thr_get_trx(thr); @@ -1213,7 +1195,8 @@ dict_create_or_check_foreign_constraint_tables(void) fprintf(stderr, "InnoDB: error %lu in creation\n", (ulong) error); - ut_a(error == DB_OUT_OF_FILE_SPACE); + ut_a(error == DB_OUT_OF_FILE_SPACE + || error == DB_TOO_MANY_CONCURRENT_TRXS); fprintf(stderr, "InnoDB: creation failed\n" @@ -1440,9 +1423,7 @@ dict_create_add_foreigns_to_dictionary( ulint number = start_id + 1; ulint error; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (NULL == dict_table_get_low("SYS_FOREIGN")) { fprintf(stderr, diff --git a/storage/innobase/dict/dict0dict.c b/storage/innobase/dict/dict0dict.c index 2b3cfdba27d..595dfb06ee5 100644 --- a/storage/innobase/dict/dict0dict.c +++ b/storage/innobase/dict/dict0dict.c @@ -30,6 +30,8 @@ Created 1/8/1996 Heikki Tuuri # include "m_ctype.h" /* my_isspace() */ #endif /* !UNIV_HOTBACKUP */ +#include <ctype.h> + dict_sys_t* dict_sys = NULL; /* the dictionary system */ rw_lock_t dict_operation_lock; /* table create, drop, etc. reserve @@ -408,14 +410,27 @@ dict_table_get_col_name( ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); s = table->col_names; - - for (i = 0; i < col_nr; i++) { - s += strlen(s) + 1; + if (s) { + for (i = 0; i < col_nr; i++) { + s += strlen(s) + 1; + } } return(s); } + +/************************************************************************ +Acquire the autoinc lock.*/ + +void +dict_table_autoinc_lock( +/*====================*/ + dict_table_t* table) +{ + mutex_enter(&table->autoinc_mutex); +} + /************************************************************************ Initializes the autoinc counter. It is not an error to initialize an already initialized counter. */ @@ -426,54 +441,8 @@ dict_table_autoinc_initialize( dict_table_t* table, /* in: table */ ib_longlong value) /* in: next value to assign to a row */ { - mutex_enter(&(table->autoinc_mutex)); - table->autoinc_inited = TRUE; table->autoinc = value; - - mutex_exit(&(table->autoinc_mutex)); -} - -/************************************************************************ -Gets the next autoinc value (== autoinc counter value), 0 if not yet -initialized. If initialized, increments the counter by 1. */ - -ib_longlong -dict_table_autoinc_get( -/*===================*/ - /* out: value for a new row, or 0 */ - dict_table_t* table) /* in: table */ -{ - ib_longlong value; - - mutex_enter(&(table->autoinc_mutex)); - - if (!table->autoinc_inited) { - - value = 0; - } else { - value = table->autoinc; - table->autoinc = table->autoinc + 1; - } - - mutex_exit(&(table->autoinc_mutex)); - - return(value); -} - -/************************************************************************ -Decrements the autoinc counter value by 1. */ - -void -dict_table_autoinc_decrement( -/*=========================*/ - dict_table_t* table) /* in: table */ -{ - mutex_enter(&(table->autoinc_mutex)); - - table->autoinc = table->autoinc - 1; - - mutex_exit(&(table->autoinc_mutex)); } /************************************************************************ @@ -488,8 +457,6 @@ dict_table_autoinc_read( { ib_longlong value; - mutex_enter(&(table->autoinc_mutex)); - if (!table->autoinc_inited) { value = 0; @@ -497,35 +464,11 @@ dict_table_autoinc_read( value = table->autoinc; } - mutex_exit(&(table->autoinc_mutex)); - return(value); } /************************************************************************ -Peeks the autoinc counter value, 0 if not yet initialized. Does not -increment the counter. The read not protected by any mutex! */ - -ib_longlong -dict_table_autoinc_peek( -/*====================*/ - /* out: value of the counter */ - dict_table_t* table) /* in: table */ -{ - ib_longlong value; - - if (!table->autoinc_inited) { - - value = 0; - } else { - value = table->autoinc; - } - - return(value); -} - -/************************************************************************ -Updates the autoinc counter if the value supplied is equal or bigger than the +Updates the autoinc counter if the value supplied is greater than the current value. If not inited, does nothing. */ void @@ -535,15 +478,21 @@ dict_table_autoinc_update( dict_table_t* table, /* in: table */ ib_longlong value) /* in: value which was assigned to a row */ { - mutex_enter(&(table->autoinc_mutex)); + if (table->autoinc_inited && value > table->autoinc) { - if (table->autoinc_inited) { - if (value >= table->autoinc) { - table->autoinc = value + 1; - } + table->autoinc = value; } +} - mutex_exit(&(table->autoinc_mutex)); +/************************************************************************ +Release the autoinc lock.*/ + +void +dict_table_autoinc_unlock( +/*======================*/ + dict_table_t* table) /* in: release autoinc lock for this table */ +{ + mutex_exit(&table->autoinc_mutex); } /************************************************************************ @@ -689,9 +638,8 @@ dict_table_get_on_id( if we are doing a rollback to handle an error in TABLE CREATE, for example, we already have the mutex! */ -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&(dict_sys->mutex)) + || trx->dict_operation_lock_mode == RW_X_LATCH); return(dict_table_get_on_id_low(table_id)); } @@ -841,30 +789,18 @@ dict_table_get( } /************************************************************************** -Adds a table object to the dictionary cache. */ +Adds system columns to a table object. */ void -dict_table_add_to_cache( -/*====================*/ - dict_table_t* table) /* in: table */ +dict_table_add_system_columns( +/*==========================*/ + dict_table_t* table, /* in/out: table */ + mem_heap_t* heap) /* in: temporary heap */ { - ulint fold; - ulint id_fold; - ulint i; - ulint row_len; - ut_ad(table); -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(table->n_def == table->n_cols - DATA_N_SYS_COLS); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - ut_ad(table->cached == FALSE); - - fold = ut_fold_string(table->name); - id_fold = ut_fold_dulint(table->id); - - table->cached = TRUE; + ut_ad(!table->cached); /* NOTE: the system columns MUST be added in the following order (so that they can be indexed by the numerical value of DATA_ROW_ID, @@ -872,19 +808,19 @@ dict_table_add_to_cache( The clustered index will not always physically contain all system columns. */ - dict_mem_table_add_col(table, "DB_ROW_ID", DATA_SYS, + dict_mem_table_add_col(table, heap, "DB_ROW_ID", DATA_SYS, DATA_ROW_ID | DATA_NOT_NULL, DATA_ROW_ID_LEN); #if DATA_ROW_ID != 0 #error "DATA_ROW_ID != 0" #endif - dict_mem_table_add_col(table, "DB_TRX_ID", DATA_SYS, + dict_mem_table_add_col(table, heap, "DB_TRX_ID", DATA_SYS, DATA_TRX_ID | DATA_NOT_NULL, DATA_TRX_ID_LEN); #if DATA_TRX_ID != 1 #error "DATA_TRX_ID != 1" #endif - dict_mem_table_add_col(table, "DB_ROLL_PTR", DATA_SYS, + dict_mem_table_add_col(table, heap, "DB_ROLL_PTR", DATA_SYS, DATA_ROLL_PTR | DATA_NOT_NULL, DATA_ROLL_PTR_LEN); #if DATA_ROLL_PTR != 2 @@ -896,10 +832,34 @@ dict_table_add_to_cache( #if DATA_N_SYS_COLS != 3 #error "DATA_N_SYS_COLS != 3" #endif +} + +/************************************************************************** +Adds a table object to the dictionary cache. */ + +void +dict_table_add_to_cache( +/*====================*/ + dict_table_t* table, /* in: table */ + mem_heap_t* heap) /* in: temporary heap */ +{ + ulint fold; + ulint id_fold; + ulint i; + ulint row_len; /* The lower limit for what we consider a "big" row */ #define BIG_ROW_SIZE 1024 + ut_ad(mutex_own(&(dict_sys->mutex))); + + dict_table_add_system_columns(table, heap); + + table->cached = TRUE; + + fold = ut_fold_string(table->name); + id_fold = ut_fold_dulint(table->id); + row_len = 0; for (i = 0; i < table->n_def; i++) { ulint col_len = dict_col_get_max_size( @@ -1003,9 +963,7 @@ dict_table_rename_in_cache( ibool success; ut_ad(table); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ old_size = mem_heap_get_size(table->heap); @@ -1209,9 +1167,7 @@ dict_table_change_id_in_cache( dulint new_id) /* in: new id to set */ { ut_ad(table); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Remove the table from the hash table of id's */ @@ -1238,9 +1194,7 @@ dict_table_remove_from_cache( ulint size; ut_ad(table); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); #if 0 @@ -1354,9 +1308,7 @@ dict_index_add_to_cache( ulint i; ut_ad(index); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(index->n_def == index->n_fields); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); @@ -1415,7 +1367,7 @@ dict_index_add_to_cache( dict_index_get_nth_field(new_index, i)->col->ord_part = 1; } - new_index->page = page_no; + new_index->page = (unsigned int) page_no; rw_lock_create(&new_index->lock, SYNC_INDEX_TREE); if (!UNIV_UNLIKELY(new_index->type & DICT_UNIVERSAL)) { @@ -1452,9 +1404,7 @@ dict_index_remove_from_cache( ut_ad(table && index); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ rw_lock_free(&index->lock); @@ -1484,9 +1434,7 @@ dict_index_find_cols( ut_ad(table && index); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < index->n_fields; i++) { ulint j; @@ -1531,10 +1479,10 @@ dict_index_add_col( field = dict_index_get_nth_field(index, index->n_def - 1); field->col = col; - field->fixed_len = dict_col_get_fixed_size(col); + field->fixed_len = (unsigned int) dict_col_get_fixed_size(col); if (prefix_len && field->fixed_len > prefix_len) { - field->fixed_len = prefix_len; + field->fixed_len = (unsigned int) prefix_len; } /* Long fixed-length fields that need external storage are treated as @@ -1544,6 +1492,12 @@ dict_index_add_col( if (field->fixed_len > DICT_MAX_INDEX_COL_LEN) { field->fixed_len = 0; } +#if DICT_MAX_INDEX_COL_LEN != 768 + /* The comparison limit above must be constant. If it were + changed, the disk format of some fixed-length columns would + change, which would be a disaster. */ +# error "DICT_MAX_INDEX_COL_LEN != 768" +#endif if (!(col->prtype & DATA_NOT_NULL)) { index->n_nullable++; @@ -1600,9 +1554,6 @@ dict_index_copy_types( ifield = dict_index_get_nth_field(index, i); dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); dict_col_copy_type(dict_field_get_col(ifield), dfield_type); - if (UNIV_UNLIKELY(ifield->prefix_len)) { - dfield_type->len = ifield->prefix_len; - } } } @@ -1648,9 +1599,7 @@ dict_index_build_internal_clust( ut_ad(table && index); ut_ad(index->type & DICT_CLUSTERED); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Create a new index object with certainly enough fields */ @@ -1736,7 +1685,7 @@ dict_index_build_internal_clust( break; } - new_index->trx_id_offset += fixed_size; + new_index->trx_id_offset += (unsigned int) fixed_size; } } @@ -1803,9 +1752,7 @@ dict_index_build_internal_non_clust( ut_ad(table && index); ut_ad(0 == (index->type & DICT_CLUSTERED)); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* The clustered index should be the first in the list of indexes */ @@ -1918,9 +1865,7 @@ dict_foreign_remove_from_cache( /*===========================*/ dict_foreign_t* foreign) /* in, own: foreign constraint */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(foreign); if (foreign->referenced_table) { @@ -1951,9 +1896,7 @@ dict_foreign_find( { dict_foreign_t* foreign; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ foreign = UT_LIST_GET_FIRST(table->foreign_list); @@ -1994,9 +1937,12 @@ dict_foreign_find_index( ulint n_cols, /* in: number of columns */ dict_index_t* types_idx, /* in: NULL or an index to whose types the column types must match */ - ibool check_charsets) + ibool check_charsets, /* in: whether to check charsets. only has an effect if types_idx != NULL */ + ulint check_null) + /* in: nonzero if none of the columns must + be declared NOT NULL */ { dict_index_t* index; dict_field_t* field; @@ -2026,6 +1972,12 @@ dict_foreign_find_index( break; } + if (check_null + && (field->col->prtype & DATA_NOT_NULL)) { + + return(NULL); + } + if (types_idx && !cmp_cols_are_equal( dict_index_get_nth_col(index, i), dict_index_get_nth_col(types_idx, @@ -2113,9 +2065,7 @@ dict_foreign_add_to_cache( ibool added_to_referenced_list= FALSE; FILE* ef = dict_foreign_err_file; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ for_table = dict_table_check_if_in_cache_low( foreign->foreign_table_name); @@ -2144,7 +2094,7 @@ dict_foreign_add_to_cache( ref_table, (const char**) for_in_cache->referenced_col_names, for_in_cache->n_fields, for_in_cache->foreign_index, - check_charsets); + check_charsets, FALSE); if (index == NULL) { dict_foreign_error_report( @@ -2176,7 +2126,10 @@ dict_foreign_add_to_cache( for_table, (const char**) for_in_cache->foreign_col_names, for_in_cache->n_fields, - for_in_cache->referenced_index, check_charsets); + for_in_cache->referenced_index, check_charsets, + for_in_cache->type + & (DICT_FOREIGN_ON_DELETE_SET_NULL + | DICT_FOREIGN_ON_UPDATE_SET_NULL)); if (index == NULL) { dict_foreign_error_report( @@ -2186,7 +2139,9 @@ dict_foreign_add_to_cache( "the columns as the first columns," " or the data types in the\n" "table do not match" - " the ones in the referenced table."); + " the ones in the referenced table\n" + "or one of the ON ... SET NULL columns" + " is declared NOT NULL."); if (for_in_cache == foreign) { if (added_to_referenced_list) { @@ -2794,9 +2749,7 @@ dict_create_foreign_constraints_low( const char* column_names[500]; const char* referenced_table_name; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ table = dict_table_get_low(name); @@ -2994,7 +2947,8 @@ col_loop1: /* Try to find an index which contains the columns as the first fields and in the right order */ - index = dict_foreign_find_index(table, column_names, i, NULL, TRUE); + index = dict_foreign_find_index(table, column_names, i, + NULL, TRUE, FALSE); if (!index) { mutex_enter(&dict_foreign_err_mutex); @@ -3045,7 +2999,7 @@ col_loop1: foreign->foreign_table_name = mem_heap_strdup(foreign->heap, table->name); foreign->foreign_index = index; - foreign->n_fields = i; + foreign->n_fields = (unsigned int) i; foreign->foreign_col_names = mem_heap_alloc(foreign->heap, i * sizeof(void*)); for (i = 0; i < foreign->n_fields; i++) { @@ -3265,7 +3219,8 @@ try_find_index: if (referenced_table) { index = dict_foreign_find_index(referenced_table, column_names, i, - foreign->foreign_index, TRUE); + foreign->foreign_index, + TRUE, FALSE); if (!index) { dict_foreign_free(foreign); mutex_enter(&dict_foreign_err_mutex); @@ -3372,7 +3327,8 @@ dict_create_foreign_constraints( ulint err; mem_heap_t* heap; - ut_a(trx && trx->mysql_thd); + ut_a(trx); + ut_a(trx->mysql_thd); str = dict_strip_comments(sql_string); heap = mem_heap_create(10000); @@ -3414,7 +3370,8 @@ dict_foreign_parse_drop_constraints( FILE* ef = dict_foreign_err_file; struct charset_info_st* cs; - ut_a(trx && trx->mysql_thd); + ut_a(trx); + ut_a(trx->mysql_thd); cs = innobase_get_charset(trx->mysql_thd); @@ -3425,9 +3382,7 @@ dict_foreign_parse_drop_constraints( str = dict_strip_comments(*(trx->mysql_query_str)); ptr = str; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ loop: ptr = dict_scan_to(ptr, "DROP"); @@ -3725,7 +3680,7 @@ dict_index_calc_min_rec_len( } /* round the NULL flags up to full bytes */ - sum += (nullable + 7) / 8; + sum += UT_BITS_IN_BYTES(nullable); return(sum); } @@ -3864,9 +3819,7 @@ dict_foreign_print_low( { ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ fprintf(stderr, " FOREIGN KEY CONSTRAINT %s: %s (", foreign->id, foreign->foreign_table_name); @@ -3931,9 +3884,7 @@ dict_table_print_low( dict_foreign_t* foreign; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ dict_update_statistics_low(table, TRUE); @@ -3989,9 +3940,7 @@ dict_col_print_low( { dtype_t type; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ dict_col_copy_type(col, &type); fprintf(stderr, "%s: ", dict_table_get_col_name(table, @@ -4011,9 +3960,7 @@ dict_index_print_low( ib_longlong n_vals; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (index->n_user_defined_cols > 0) { n_vals = index->stat_n_diff_key_vals[ @@ -4061,9 +4008,8 @@ dict_field_print_low( /*=================*/ dict_field_t* field) /* in: field */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + fprintf(stderr, " %s", field->name); if (field->prefix_len != 0) { diff --git a/storage/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c index 518e32ec4dc..1ff1fd54cec 100644 --- a/storage/innobase/dict/dict0load.c +++ b/storage/innobase/dict/dict0load.c @@ -67,9 +67,7 @@ dict_get_first_table_name_in_db( ulint len; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ heap = mem_heap_create(1000); @@ -353,9 +351,7 @@ dict_load_columns( ulint i; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); @@ -427,7 +423,8 @@ dict_load_columns( ut_a(name_of_col_is(sys_columns, sys_index, 8, "PREC")); - dict_mem_table_add_col(table, name, mtype, prtype, col_len); + dict_mem_table_add_col(table, heap, name, + mtype, prtype, col_len); btr_pcur_move_to_next_user_rec(&pcur, &mtr); } @@ -478,11 +475,7 @@ dict_load_fields( ulint i; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - - UT_NOT_USED(table); mtr_start(&mtr); @@ -586,9 +579,7 @@ dict_load_indexes( dulint id; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if ((ut_dulint_get_high(table->id) == 0) && (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) { @@ -754,11 +745,9 @@ dict_load_table( ulint err; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - heap = mem_heap_create(1000); + heap = mem_heap_create(32000); mtr_start(&mtr); @@ -843,7 +832,7 @@ err_exit: table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL, flags); - table->ibd_file_missing = ibd_file_missing; + table->ibd_file_missing = (unsigned int) ibd_file_missing; ut_a(name_of_col_is(sys_tables, sys_index, 3, "ID")); @@ -864,7 +853,9 @@ err_exit: dict_load_columns(table, heap); - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); + + mem_heap_empty(heap); dict_load_indexes(table, heap); @@ -920,9 +911,7 @@ dict_load_table_on_id( dict_table_t* table; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* NOTE that the operation of this function is protected by the dictionary mutex, and therefore no deadlocks can occur @@ -1003,9 +992,7 @@ dict_load_sys_table( { mem_heap_t* heap; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ heap = mem_heap_create(1000); @@ -1035,9 +1022,7 @@ dict_load_foreign_cols( ulint i; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ foreign->foreign_col_names = mem_heap_alloc( foreign->heap, foreign->n_fields * sizeof(void*)); @@ -1110,11 +1095,10 @@ dict_load_foreign( rec_t* rec; byte* field; ulint len; + ulint n_fields_and_type; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ heap2 = mem_heap_create(1000); @@ -1172,15 +1156,15 @@ dict_load_foreign( foreign = dict_mem_foreign_create(); - foreign->n_fields = mach_read_from_4( + n_fields_and_type = mach_read_from_4( rec_get_nth_field_old(rec, 5, &len)); ut_a(len == 4); - /* We store the type to the bits 24-31 of n_fields */ + /* We store the type in the bits 24..29 of n_fields_and_type. */ - foreign->type = foreign->n_fields >> 24; - foreign->n_fields = foreign->n_fields & 0xFFFFFFUL; + foreign->type = (unsigned int) (n_fields_and_type >> 24); + foreign->n_fields = (unsigned int) (n_fields_and_type & 0x3FFUL); foreign->id = mem_heap_strdup(foreign->heap, id); @@ -1242,9 +1226,7 @@ dict_load_foreigns( ulint err; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ sys_foreign = dict_table_get_low("SYS_FOREIGN"); diff --git a/storage/innobase/dict/dict0mem.c b/storage/innobase/dict/dict0mem.c index cee0ffec20b..47cf7a0bc9c 100644 --- a/storage/innobase/dict/dict0mem.c +++ b/storage/innobase/dict/dict0mem.c @@ -50,14 +50,14 @@ dict_mem_table_create( table->heap = heap; - table->flags = flags; + table->flags = (unsigned int) flags; table->name = mem_heap_strdup(heap, name); table->dir_path_of_temp_table = NULL; - table->space = space; + table->space = (unsigned int) space; table->ibd_file_missing = FALSE; table->tablespace_discarded = FALSE; table->n_def = 0; - table->n_cols = n_cols + DATA_N_SYS_COLS; + table->n_cols = (unsigned int) (n_cols + DATA_N_SYS_COLS); table->n_mysql_handles_opened = 0; table->n_foreign_key_checks_running = 0; @@ -90,6 +90,15 @@ dict_mem_table_create( mutex_create(&table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX); table->autoinc_inited = FALSE; + + /* The actual increment value will be set by MySQL, we simply + default to 1 here.*/ + table->autoinc_increment = 1; + + /* The number of transactions that are either waiting on the + AUTOINC lock or have been granted the lock. */ + table->n_waiting_or_granted_auto_inc_locks = 0; + #ifdef UNIV_DEBUG table->magic_n = DICT_TABLE_MAGIC_N; #endif /* UNIV_DEBUG */ @@ -108,18 +117,11 @@ dict_mem_table_free( ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); mutex_free(&(table->autoinc_mutex)); - - if (table->col_names && (table->n_def < table->n_cols)) { - ut_free((void*)table->col_names); - } - mem_heap_free(table->heap); } /******************************************************************** -Add 'name' to end of the col_names array (see dict_table_t::col_names). Call -ut_free on col_names (if not NULL), allocate new array (if heap, from it, -otherwise with ut_malloc), and copy col_names + name to it. */ +Append 'name' to 'col_names' (@see dict_table_t::col_names). */ static const char* dict_add_col_name( @@ -129,21 +131,19 @@ dict_add_col_name( NULL */ ulint cols, /* in: number of existing columns */ const char* name, /* in: new column name */ - mem_heap_t* heap) /* in: heap, or NULL */ + mem_heap_t* heap) /* in: heap */ { - ulint i; - ulint old_len; - ulint new_len; - ulint total_len; - const char* s; - char* res; + ulint old_len; + ulint new_len; + ulint total_len; + char* res; - ut_a(((cols == 0) && !col_names) || ((cols > 0) && col_names)); - ut_a(*name); + ut_ad(!cols == !col_names); /* Find out length of existing array. */ if (col_names) { - s = col_names; + const char* s = col_names; + ulint i; for (i = 0; i < cols; i++) { s += strlen(s) + 1; @@ -157,11 +157,7 @@ dict_add_col_name( new_len = strlen(name) + 1; total_len = old_len + new_len; - if (heap) { - res = mem_heap_alloc(heap, total_len); - } else { - res = ut_malloc(total_len); - } + res = mem_heap_alloc(heap, total_len); if (old_len > 0) { memcpy(res, col_names, old_len); @@ -169,10 +165,6 @@ dict_add_col_name( memcpy(res + old_len, name, new_len); - if (col_names) { - ut_free((char*)col_names); - } - return(res); } @@ -183,7 +175,8 @@ void dict_mem_table_add_col( /*===================*/ dict_table_t* table, /* in: table */ - const char* name, /* in: column name */ + mem_heap_t* heap, /* in: temporary memory heap, or NULL */ + const char* name, /* in: column name, or NULL */ ulint mtype, /* in: main datatype */ ulint prtype, /* in: precise type */ ulint len) /* in: precision */ @@ -191,31 +184,42 @@ dict_mem_table_add_col( dict_col_t* col; ulint mbminlen; ulint mbmaxlen; - mem_heap_t* heap; + ulint i; - ut_ad(table && name); + ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + ut_ad(!heap == !name); - table->n_def++; + i = table->n_def++; - heap = table->n_def < table->n_cols ? NULL : table->heap; - table->col_names = dict_add_col_name(table->col_names, - table->n_def - 1, - name, heap); + if (name) { + if (UNIV_UNLIKELY(table->n_def == table->n_cols)) { + heap = table->heap; + } + if (UNIV_LIKELY(i) && UNIV_UNLIKELY(!table->col_names)) { + /* All preceding column names are empty. */ + char* s = mem_heap_alloc(heap, table->n_def); + memset(s, 0, table->n_def); + table->col_names = s; + } - col = (dict_col_t*) dict_table_get_nth_col(table, table->n_def - 1); + table->col_names = dict_add_col_name(table->col_names, + i, name, heap); + } - col->ind = table->n_def - 1; + col = (dict_col_t*) dict_table_get_nth_col(table, i); + + col->ind = (unsigned int) i; col->ord_part = 0; - col->mtype = mtype; - col->prtype = prtype; - col->len = len; + col->mtype = (unsigned int) mtype; + col->prtype = (unsigned int) prtype; + col->len = (unsigned int) len; dtype_get_mblen(mtype, prtype, &mbminlen, &mbmaxlen); - col->mbminlen = mbminlen; - col->mbmaxlen = mbmaxlen; + col->mbminlen = (unsigned int) mbminlen; + col->mbmaxlen = (unsigned int) mbmaxlen; } /************************************************************************** @@ -245,13 +249,13 @@ dict_mem_index_create( index->heap = heap; index->type = type; - index->space = space; + index->space = (unsigned int) space; index->page = 0; index->name = mem_heap_strdup(heap, index_name); index->table_name = table_name; index->table = NULL; index->n_def = index->n_nullable = 0; - index->n_fields = n_fields; + index->n_fields = (unsigned int) n_fields; index->fields = mem_heap_alloc(heap, 1 + n_fields * sizeof(dict_field_t)); /* The '1 +' above prevents allocation @@ -318,7 +322,7 @@ dict_mem_index_add_field( { dict_field_t* field; - ut_ad(index && name); + ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); index->n_def++; @@ -326,7 +330,7 @@ dict_mem_index_add_field( field = dict_index_get_nth_field(index, index->n_def - 1); field->name = name; - field->prefix_len = prefix_len; + field->prefix_len = (unsigned int) prefix_len; } /************************************************************************** diff --git a/storage/innobase/dyn/Makefile.am b/storage/innobase/dyn/Makefile.am deleted file mode 100644 index 57d9a25e481..00000000000 --- a/storage/innobase/dyn/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libdyn.a - -libdyn_a_SOURCES = dyn0dyn.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/eval/Makefile.am b/storage/innobase/eval/Makefile.am deleted file mode 100644 index 6c2b05d8b7a..00000000000 --- a/storage/innobase/eval/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libeval.a - -libeval_a_SOURCES = eval0eval.c eval0proc.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/eval/eval0proc.c b/storage/innobase/eval/eval0proc.c index f5a9d9dc2a8..a513e8e4024 100644 --- a/storage/innobase/eval/eval0proc.c +++ b/storage/innobase/eval/eval0proc.c @@ -194,7 +194,7 @@ for_step( loop_var_value = eval_node_get_int_val(node->loop_start_limit); node->loop_end_value - = eval_node_get_int_val(node->loop_end_limit); + = (int) eval_node_get_int_val(node->loop_end_limit); } /* Check if we should do another loop */ diff --git a/storage/innobase/fil/Makefile.am b/storage/innobase/fil/Makefile.am deleted file mode 100644 index 0a85ceb5b86..00000000000 --- a/storage/innobase/fil/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libfil.a - -libfil_a_SOURCES = fil0fil.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/fil/fil0fil.c b/storage/innobase/fil/fil0fil.c index 883fbd09ee4..c63d67cae60 100644 --- a/storage/innobase/fil/fil0fil.c +++ b/storage/innobase/fil/fil0fil.c @@ -409,9 +409,7 @@ fil_space_is_flushed( { fil_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(fil_system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ node = UT_LIST_GET_FIRST(space->chain); @@ -514,9 +512,7 @@ fil_node_open_file( ulint space_id; #endif /* !UNIV_HOTBACKUP */ -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(node->n_pending == 0); ut_a(node->open == FALSE); @@ -660,9 +656,7 @@ fil_node_close_file( ibool ret; ut_ad(node && system); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(node->open); ut_a(node->n_pending == 0); ut_a(node->n_pending_flushes == 0); @@ -705,9 +699,8 @@ fil_try_to_close_file_in_LRU( fil_system_t* system = fil_system; fil_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + node = UT_LIST_GET_LAST(system->LRU); if (print_info) { @@ -765,9 +758,7 @@ fil_mutex_enter_and_prepare_for_io( ulint count = 0; ulint count2 = 0; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ retry: mutex_enter(&(system->mutex)); @@ -881,9 +872,7 @@ fil_node_free( fil_space_t* space) /* in: space where the file node is chained */ { ut_ad(node && system && space); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(node->magic_n == FIL_NODE_MAGIC_N); ut_a(node->n_pending == 0); @@ -3870,9 +3859,7 @@ fil_node_prepare_for_io( fil_space_t* space) /* in: space */ { ut_ad(node && system && space); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (system->n_open > system->max_n_open + 5) { ut_print_timestamp(stderr); @@ -3917,9 +3904,7 @@ fil_node_complete_io( { ut_ad(node); ut_ad(system); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(system->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(node->n_pending > 0); diff --git a/storage/innobase/fsp/Makefile.am b/storage/innobase/fsp/Makefile.am deleted file mode 100644 index 7818cdafc1b..00000000000 --- a/storage/innobase/fsp/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -include ../include/Makefile.i - -noinst_LIBRARIES = libfsp.a - -libfsp_a_SOURCES = fsp0fsp.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c index 00c5e582b3e..e1074933fe8 100644 --- a/storage/innobase/fsp/fsp0fsp.c +++ b/storage/innobase/fsp/fsp0fsp.c @@ -205,10 +205,9 @@ the extent are free and which contain old tuple version to clean. */ space */ #define XDES_FSEG 4 /* extent belongs to a segment */ -/* File extent data structure size in bytes. The "+ 7 ) / 8" part in the -definition rounds the number of bytes upward. */ +/* File extent data structure size in bytes. */ #define XDES_SIZE \ - (XDES_BITMAP + (FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE + 7) / 8) + (XDES_BITMAP + UT_BITS_IN_BYTES(FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE)) /* Offset of the descriptor array on a descriptor page */ #define XDES_ARR_OFFSET (FSP_HEADER_OFFSET + FSP_HEADER_SIZE) @@ -2045,11 +2044,9 @@ fseg_create_general( mtr); } -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ latch = fil_space_get_latch(space); mtr_x_lock(latch, mtr); @@ -2205,11 +2202,10 @@ fseg_n_reserved_pages( space = buf_frame_get_space_id(header); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ + mtr_x_lock(fil_space_get_latch(space), mtr); inode = fseg_inode_get(header, mtr); @@ -2601,11 +2597,9 @@ fseg_alloc_free_page_general( space = buf_frame_get_space_id(seg_header); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ latch = fil_space_get_latch(space); mtr_x_lock(latch, mtr); @@ -2751,11 +2745,9 @@ fsp_reserve_free_extents( ulint n_pages_added; ut_ad(mtr); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ *n_reserved = n_ext; latch = fil_space_get_latch(space); @@ -2837,7 +2829,7 @@ will be able to insert new data to the database without running out the tablespace. Only free extents are taken into account and we also subtract the safety margin required by the above function fsp_reserve_free_extents. */ -ulint +ullint fsp_get_available_space_in_free_extents( /*====================================*/ /* out: available space in kB */ @@ -2853,9 +2845,8 @@ fsp_get_available_space_in_free_extents( rw_lock_t* latch; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + mtr_start(&mtr); latch = fil_space_get_latch(space); @@ -2904,7 +2895,8 @@ fsp_get_available_space_in_free_extents( return(0); } - return(((n_free - reserve) * FSP_EXTENT_SIZE) + return((ullint)(n_free - reserve) + * FSP_EXTENT_SIZE * (UNIV_PAGE_SIZE / 1024)); } @@ -3113,11 +3105,10 @@ fseg_free_page( { fseg_inode_t* seg_inode; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ + mtr_x_lock(fil_space_get_latch(space), mtr); seg_inode = fseg_inode_get(seg_header, mtr); @@ -3222,11 +3213,10 @@ fseg_free_step( space = buf_frame_get_space_id(header); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ + mtr_x_lock(fil_space_get_latch(space), mtr); descr = xdes_get_descriptor(space, buf_frame_get_page_no(header), mtr); @@ -3297,11 +3287,10 @@ fseg_free_step_not_header( space = buf_frame_get_space_id(header); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex) || mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); -#endif /* UNIV_SYNC_DEBUG */ + mtr_x_lock(fil_space_get_latch(space), mtr); inode = fseg_inode_get(header, mtr); @@ -3660,7 +3649,11 @@ fsp_validate( n_full_frag_pages = FSP_EXTENT_SIZE * flst_get_len(header + FSP_FULL_FRAG, &mtr); - ut_a(free_limit <= size || (space != 0 && size < FSP_EXTENT_SIZE)); + if (UNIV_UNLIKELY(free_limit > size)) { + + ut_a(space != 0); + ut_a(size < FSP_EXTENT_SIZE); + } flst_validate(header + FSP_FREE, &mtr); flst_validate(header + FSP_FREE_FRAG, &mtr); diff --git a/storage/innobase/fut/Makefile.am b/storage/innobase/fut/Makefile.am deleted file mode 100644 index ffe9835a023..00000000000 --- a/storage/innobase/fut/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libfut.a - -libfut_a_SOURCES = fut0fut.c fut0lst.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/ha/Makefile.am b/storage/innobase/ha/Makefile.am deleted file mode 100644 index 696cad0b203..00000000000 --- a/storage/innobase/ha/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libha.a - -libha_a_SOURCES = ha0ha.c hash0hash.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/ha/ha0ha.c b/storage/innobase/ha/ha0ha.c index 07dfb66afa8..7f241140050 100644 --- a/storage/innobase/ha/ha0ha.c +++ b/storage/innobase/ha/ha0ha.c @@ -96,9 +96,8 @@ ha_insert_for_fold( ulint hash; ut_ad(table && data); -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ + hash = hash_calc_hash(fold, table); cell = hash_get_nth_cell(table, hash); @@ -194,9 +193,8 @@ ha_delete( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ + node = ha_search_with_data(table, fold, data); ut_a(node); @@ -218,9 +216,7 @@ ha_search_and_update_if_found( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ node = ha_search_with_data(table, fold, data); @@ -248,9 +244,8 @@ ha_remove_all_nodes_to_page( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ + node = ha_chain_get_first(table, fold); while (node) { diff --git a/storage/innobase/handler/Makefile.am b/storage/innobase/handler/Makefile.am deleted file mode 100644 index 0d34212bdd4..00000000000 --- a/storage/innobase/handler/Makefile.am +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB -# & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -DEFS = -DMYSQL_SERVER @DEFS@ - -noinst_LIBRARIES = libhandler.a - -libhandler_a_SOURCES = ha_innodb.cc - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index ee74fce322e..ce9dcb20e7d 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -15,11 +15,10 @@ /* This file defines the InnoDB handler: the interface between MySQL and InnoDB NOTE: You can only use noninlined InnoDB functions in this file, because we -have disables the InnoDB inlining in this file. */ +have disabled the InnoDB inlining in this file. */ /* TODO list for the InnoDB handler in 5.0: - - Remove the flag trx->active_trans and look at the InnoDB - trx struct state field + - Remove the flag trx->active_trans and look at trx->conc_state - fix savepoint functions to use savepoint storage area - Find out what kind of problems the OS X case-insensitivity causes to table and database names; should we 'normalize' the names like we do @@ -31,8 +30,7 @@ have disables the InnoDB inlining in this file. */ #endif #include <mysql_priv.h> - -#ifdef WITH_INNOBASE_STORAGE_ENGINE +#include <mysqld_error.h> #include <m_ctype.h> #include <hash.h> @@ -40,74 +38,29 @@ have disables the InnoDB inlining in this file. */ #include <mysys_err.h> #include <my_sys.h> #include "ha_innodb.h" - -pthread_mutex_t innobase_share_mutex, /* to protect innobase_open_files */ - prepare_commit_mutex; /* to force correct commit order in - binlog */ -ulong commit_threads= 0; -pthread_mutex_t commit_threads_m; -pthread_cond_t commit_cond; -pthread_mutex_t commit_cond_m; -bool innodb_inited= 0; +#include <mysql/plugin.h> + +#ifndef MYSQL_SERVER +/* This is needed because of Bug #3596. Let us hope that pthread_mutex_t +is defined the same in both builds: the MySQL server and the InnoDB plugin. */ +extern pthread_mutex_t LOCK_thread_count; +#endif /* MYSQL_SERVER */ + +/** to protect innobase_open_files */ +static pthread_mutex_t innobase_share_mutex; +/** to force correct commit order in binlog */ +static pthread_mutex_t prepare_commit_mutex; +static ulong commit_threads = 0; +static pthread_mutex_t commit_threads_m; +static pthread_cond_t commit_cond; +static pthread_mutex_t commit_cond_m; +static bool innodb_inited = 0; /* This needs to exist until the query cache callback is removed or learns to pass hton. */ -static handlerton *legacy_innodb_hton; - -/*-----------------------------------------------------------------*/ -/* These variables are used to implement (semi-)synchronous MySQL binlog -replication for InnoDB tables. */ - -pthread_cond_t innobase_repl_cond; /* Posix cond variable; - this variable is signaled - when enough binlog has been - sent to slave, so that a - waiting trx can return the - 'ok' message to the client - for a commit */ -pthread_mutex_t innobase_repl_cond_mutex; /* Posix cond variable mutex - that also protects the next - innobase_repl_... variables */ -uint innobase_repl_state; /* 1 if synchronous replication - is switched on and is working - ok; else 0 */ -uint innobase_repl_file_name_inited = 0; /* This is set to 1 when - innobase_repl_file_name - contains meaningful data */ -char* innobase_repl_file_name; /* The binlog name up to which - we have sent some binlog to - the slave */ -my_off_t innobase_repl_pos; /* The position in that file - up to which we have sent the - binlog to the slave */ -uint innobase_repl_n_wait_threads = 0; /* This tells how many - transactions currently are - waiting for the binlog to be - sent to the client */ -uint innobase_repl_wait_file_name_inited = 0; /* This is set to 1 - when we know the 'smallest' - wait position */ -char* innobase_repl_wait_file_name; /* NULL, or the 'smallest' - innobase_repl_file_name that - a transaction is waiting for */ -my_off_t innobase_repl_wait_pos; /* The smallest position in - that file that a trx is - waiting for: the trx can - proceed and send an 'ok' to - the client when MySQL has sent - the binlog up to this position - to the slave */ -/*-----------------------------------------------------------------*/ - - - -/* Store MySQL definition of 'byte': in Linux it is char while InnoDB -uses unsigned char; the header univ.i which we include next defines -'byte' as a macro which expands to 'unsigned char' */ - -typedef byte mysql_byte; +static handlerton *innodb_hton_ptr; #define INSIDE_HA_INNOBASE_CC @@ -139,48 +92,45 @@ extern "C" { #include "../storage/innobase/include/ha_prototypes.h" } -#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */ -#define HA_INNOBASE_RANGE_COUNT 100 - -ulong innobase_large_page_size = 0; +static const long AUTOINC_OLD_STYLE_LOCKING = 0; +static const long AUTOINC_NEW_STYLE_LOCKING = 1; +static const long AUTOINC_NO_LOCKING = 2; -/* The default values for the following, type long or longlong, start-up -parameters are declared in mysqld.cc: */ - -long innobase_mirrored_log_groups, innobase_log_files_in_group, +static long innobase_mirrored_log_groups, innobase_log_files_in_group, innobase_log_buffer_size, innobase_buffer_pool_awe_mem_mb, innobase_additional_mem_pool_size, innobase_file_io_threads, innobase_lock_wait_timeout, innobase_force_recovery, - innobase_open_files; + innobase_open_files, innobase_autoinc_lock_mode; -longlong innobase_buffer_pool_size, innobase_log_file_size; +static long long innobase_buffer_pool_size, innobase_log_file_size; /* The default values for the following char* start-up parameters are determined in innobase_init below: */ -char* innobase_data_home_dir = NULL; -char* innobase_data_file_path = NULL; -char* innobase_log_group_home_dir = NULL; -char* innobase_log_arch_dir = NULL;/* unused */ +static char* innobase_data_home_dir = NULL; +static char* innobase_data_file_path = NULL; +static char* innobase_log_group_home_dir = NULL; /* The following has a misleading name: starting from 4.0.5, this also affects Windows: */ -char* innobase_unix_file_flush_method = NULL; +static char* innobase_unix_file_flush_method = NULL; /* Below we have boolean-valued start-up parameters, and their default values */ -ulong innobase_fast_shutdown = 1; -my_bool innobase_log_archive = FALSE;/* unused */ -my_bool innobase_use_doublewrite = TRUE; -my_bool innobase_use_checksums = TRUE; -my_bool innobase_use_large_pages = FALSE; -my_bool innobase_use_native_aio = FALSE; -my_bool innobase_file_per_table = FALSE; -my_bool innobase_locks_unsafe_for_binlog = FALSE; -my_bool innobase_rollback_on_timeout = FALSE; -my_bool innobase_create_status_file = FALSE; +static ulong innobase_fast_shutdown = 1; +#ifdef UNIV_LOG_ARCHIVE +static my_bool innobase_log_archive = FALSE; +static char* innobase_log_arch_dir = NULL; +#endif /* UNIV_LOG_ARCHIVE */ +static my_bool innobase_use_doublewrite = TRUE; +static my_bool innobase_use_checksums = TRUE; +static my_bool innobase_file_per_table = FALSE; +static my_bool innobase_locks_unsafe_for_binlog = FALSE; +static my_bool innobase_rollback_on_timeout = FALSE; +static my_bool innobase_create_status_file = FALSE; +static my_bool innobase_stats_on_metadata = TRUE; -static char *internal_innobase_data_file_path = NULL; +static char* internal_innobase_data_file_path = NULL; /* The following counter is used to convey information to InnoDB about server activity: in selects it is not sensible to call @@ -188,7 +138,7 @@ srv_active_wake_master_thread after each fetch or search, we only do it every INNOBASE_WAKE_INTERVAL'th step. */ #define INNOBASE_WAKE_INTERVAL 32 -ulong innobase_active_counter = 0; +static ulong innobase_active_counter = 0; static HASH innobase_open_tables; @@ -196,7 +146,7 @@ static HASH innobase_open_tables; bool nw_panic = FALSE; #endif -static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, +static uchar* innobase_get_key(INNOBASE_SHARE *share, size_t *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); @@ -214,6 +164,17 @@ static handler *innobase_create_handler(handlerton *hton, static const char innobase_hton_name[]= "InnoDB"; + +static MYSQL_THDVAR_BOOL(support_xa, PLUGIN_VAR_OPCMDARG, + "Enable InnoDB support for the XA two-phase commit", + /* check_func */ NULL, /* update_func */ NULL, + /* default */ TRUE); + +static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG, + "Enable InnoDB locking in LOCK TABLES", + /* check_func */ NULL, /* update_func */ NULL, + /* default */ TRUE); + static handler *innobase_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) @@ -221,16 +182,149 @@ static handler *innobase_create_handler(handlerton *hton, return new (mem_root) ha_innobase(hton, table); } +/*********************************************************************** +This function is used to prepare X/Open XA distributed transaction */ +static +int +innobase_xa_prepare( +/*================*/ + /* out: 0 or error number */ + handlerton* hton, + THD* thd, /* in: handle to the MySQL thread of the user + whose XA transaction should be prepared */ + bool all); /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ +/*********************************************************************** +This function is used to recover X/Open XA distributed transactions */ +static +int +innobase_xa_recover( +/*================*/ + /* out: number of prepared transactions + stored in xid_list */ + handlerton* hton, + XID* xid_list, /* in/out: prepared transactions */ + uint len); /* in: number of slots in xid_list */ +/*********************************************************************** +This function is used to commit one X/Open XA distributed transaction +which is in the prepared state */ +static +int +innobase_commit_by_xid( +/*===================*/ + /* out: 0 or error number */ + handlerton* hton, + XID* xid); /* in: X/Open XA transaction identification */ +/*********************************************************************** +This function is used to rollback one X/Open XA distributed transaction +which is in the prepared state */ +static +int +innobase_rollback_by_xid( +/*=====================*/ + /* out: 0 or error number */ + handlerton* hton, + XID *xid); /* in: X/Open XA transaction identification */ +/*********************************************************************** +Create a consistent view for a cursor based on current transaction +which is created if the corresponding MySQL thread still lacks one. +This consistent view is then used inside of MySQL when accessing records +using a cursor. */ +static +void* +innobase_create_cursor_view( +/*========================*/ + /* out: pointer to cursor view or NULL */ + handlerton* hton, /* in: innobase hton */ + THD* thd); /* in: user thread handle */ +/*********************************************************************** +Set the given consistent cursor view to a transaction which is created +if the corresponding MySQL thread still lacks one. If the given +consistent cursor view is NULL global read view of a transaction is +restored to a transaction read view. */ +static +void +innobase_set_cursor_view( +/*=====================*/ + handlerton* hton, + THD* thd, /* in: user thread handle */ + void* curview);/* in: Consistent cursor view to be set */ +/*********************************************************************** +Close the given consistent cursor view of a transaction and restore +global read view to a transaction read view. Transaction is created if the +corresponding MySQL thread still lacks one. */ +static +void +innobase_close_cursor_view( +/*=======================*/ + handlerton* hton, + THD* thd, /* in: user thread handle */ + void* curview);/* in: Consistent read view to be closed */ +/********************************************************************* +Removes all tables in the named database inside InnoDB. */ +static +void +innobase_drop_database( +/*===================*/ + /* out: error number */ + handlerton* hton, /* in: handlerton of Innodb */ + char* path); /* in: database path; inside InnoDB the name + of the last directory in the path is used as + the database name: for example, in 'mysql/data/test' + the database name is 'test' */ +/*********************************************************************** +Closes an InnoDB database. */ +static +int +innobase_end(handlerton *hton, ha_panic_function type); /********************************************************************* -Commits a transaction in an InnoDB database. */ +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +assigns a new snapshot for a consistent read if the transaction does not yet +have one. */ +static +int +innobase_start_trx_and_assign_read_view( +/*====================================*/ + /* out: 0 */ + handlerton* hton, /* in: Innodb handlerton */ + THD* thd); /* in: MySQL thread handle of the user for whom + the transaction should be committed */ +/******************************************************************** +Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes +the logs, and the name of this function should be innobase_checkpoint. */ +static +bool +innobase_flush_logs( +/*================*/ + /* out: TRUE if error */ + handlerton* hton); /* in: InnoDB handlerton */ +/**************************************************************************** +Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB +Monitor to the client. */ +static +bool +innodb_show_status( +/*===============*/ + handlerton* hton, /* in: the innodb handlerton */ + THD* thd, /* in: the MySQL query thread of the caller */ + stat_print_fn *stat_print); +static +bool innobase_show_status(handlerton *hton, THD* thd, + stat_print_fn* stat_print, + enum ha_stat_type stat_type); + +/********************************************************************* +Commits a transaction in an InnoDB database. */ +static void innobase_commit_low( /*================*/ trx_t* trx); /* in: transaction handle */ -SHOW_VAR innodb_status_variables[]= { +static SHOW_VAR innodb_status_variables[]= { {"buffer_pool_pages_data", (char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG}, {"buffer_pool_pages_dirty", @@ -323,6 +417,22 @@ SHOW_VAR innodb_status_variables[]= { /* General functions */ /********************************************************************** +Returns true if the thread is the replication thread on the slave +server. Used in srv_conc_enter_innodb() to determine if the thread +should be allowed to enter InnoDB - the replication thread is treated +differently than other threads. Also used in +srv_conc_force_exit_innodb(). */ +extern "C" +ibool +thd_is_replication_slave_thread( +/*============================*/ + /* out: true if thd is the replication thread */ + void* thd) /* in: thread handle (THD*) */ +{ + return((ibool) thd_slave_thread((THD*) thd)); +} + +/********************************************************************** Save some CPU by testing the value of srv_thread_concurrency in inline functions. */ inline @@ -378,25 +488,56 @@ innobase_release_stat_resources( } } +/********************************************************************** +Returns true if the transaction this thread is processing has edited +non-transactional tables. Used by the deadlock detector when deciding +which transaction to rollback in case of a deadlock - we try to avoid +rolling back transactions that have edited non-transactional tables. */ +extern "C" +ibool +thd_has_edited_nontrans_tables( +/*===========================*/ + /* out: true if non-transactional tables have + been edited */ + void* thd) /* in: thread handle (THD*) */ +{ + return((ibool) thd_non_transactional_update((THD*) thd)); +} + +/************************************************************************ +Obtain the InnoDB transaction of a MySQL thread. */ +inline +trx_t*& +thd_to_trx( +/*=======*/ + /* out: reference to transaction pointer */ + THD* thd) /* in: MySQL thread */ +{ + return(*(trx_t**) thd_ha_data(thd, innodb_hton_ptr)); +} + /************************************************************************ Call this function when mysqld passes control to the client. That is to avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more documentation, see handler.cc. */ - +static int innobase_release_temporary_latches( /*===============================*/ - handlerton *hton, - THD *thd) + /* out: 0 */ + handlerton* hton, /* in: handlerton */ + THD* thd) /* in: MySQL thread */ { trx_t* trx; + DBUG_ASSERT(hton == innodb_hton_ptr); + if (!innodb_inited) { return 0; } - trx = (trx_t*) thd->ha_data[hton->slot]; + trx = thd_to_trx(thd); if (trx) { innobase_release_stat_resources(trx); @@ -458,9 +599,7 @@ convert_error_code_to_mysql( tell it also to MySQL so that MySQL knows to empty the cached binlog for this transaction */ - if (thd) { - ha_rollback(thd); - } + thd_mark_transaction_to_rollback(thd, TRUE); return(HA_ERR_LOCK_DEADLOCK); @@ -470,9 +609,8 @@ convert_error_code_to_mysql( latest SQL statement in a lock wait timeout. Previously, we rolled back the whole transaction. */ - if (thd && row_rollback_on_timeout) { - ha_rollback(thd); - } + thd_mark_transaction_to_rollback(thd, + (bool)row_rollback_on_timeout); return(HA_ERR_LOCK_WAIT_TIMEOUT); @@ -507,7 +645,7 @@ convert_error_code_to_mysql( } else if (error == (int) DB_TABLE_NOT_FOUND) { - return(HA_ERR_KEY_NOT_FOUND); + return(HA_ERR_NO_SUCH_TABLE); } else if (error == (int) DB_TOO_BIG_RECORD) { @@ -524,11 +662,23 @@ convert_error_code_to_mysql( tell it also to MySQL so that MySQL knows to empty the cached binlog for this transaction */ - if (thd) { - ha_rollback(thd); - } + thd_mark_transaction_to_rollback(thd, TRUE); return(HA_ERR_LOCK_TABLE_FULL); + } else if (error == DB_TOO_MANY_CONCURRENT_TRXS) { + + /* Once MySQL add the appropriate code to errmsg.txt then + we can get rid of this #ifdef. NOTE: The code checked by + the #ifdef is the suggested name for the error condition + and the actual error code name could very well be different. + This will require some monitoring, ie. the status + of this request on our part.*/ +#ifdef ER_TOO_MANY_CONCURRENT_TRXS + return(ER_TOO_MANY_CONCURRENT_TRXS); +#else + return(HA_ERR_RECORD_FILE_FULL); +#endif + } else { return(-1); // Unknown error } @@ -576,78 +726,12 @@ innobase_mysql_print_thd( uint max_query_len) /* in: max query length to print, or 0 to use the default max length */ { - const THD* thd; - const Security_context *sctx; - const char* s; - - thd = (const THD*) input_thd; - /* We probably want to have original user as part of debug output. */ - sctx = &thd->main_security_ctx; - - - fprintf(f, "MySQL thread id %lu, query id %lu", - thd->thread_id, (ulong) thd->query_id); - if (sctx->host) { - putc(' ', f); - fputs(sctx->host, f); - } - - if (sctx->ip) { - putc(' ', f); - fputs(sctx->ip, f); - } - - if (sctx->user) { - putc(' ', f); - fputs(sctx->user, f); - } - - if ((s = thd->proc_info)) { - putc(' ', f); - fputs(s, f); - } - - if ((s = thd->query)) { - /* 3100 is chosen because currently 3000 is the maximum - max_query_len we ever give this. */ - char buf[3100]; - uint len; - - /* If buf is too small, we dynamically allocate storage - in this. */ - char* dyn_str = NULL; - - /* Points to buf or dyn_str. */ - char* str = buf; - - if (max_query_len == 0) { - /* ADDITIONAL SAFETY: the default is to print at - most 300 chars to reduce the probability of a - seg fault if there is a race in - thd->query_length in MySQL; after May 14, 2004 - probably no race any more, but better be - safe */ - max_query_len = 300; - } - - len = min(thd->query_length, max_query_len); - - if (len > (sizeof(buf) - 1)) { - dyn_str = my_malloc(len + 1, MYF(0)); - str = dyn_str; - } - - /* Use strmake to reduce the timeframe for a race, - compared to fwrite() */ - len = (uint) (strmake(str, s, len) - str); - putc('\n', f); - fwrite(str, 1, len, f); - - if (dyn_str) { - my_free(dyn_str, MYF(0)); - } - } + THD* thd; + char buffer[1024]; + thd = (THD*) input_thd; + fputs(thd_security_context(thd, buffer, sizeof(buffer), + max_query_len), f); putc('\n', f); } @@ -655,7 +739,7 @@ innobase_mysql_print_thd( Get the variable length bounds of the given character set. NOTE that the exact prototype of this function has to be in -/innobase/data/data0type.ic! */ +/innobase/include/data0type.ic! */ extern "C" void innobase_get_cset_width( @@ -694,8 +778,8 @@ innobase_convert_from_table_id( { uint errors; - strconvert(current_thd->charset(), from, - &my_charset_filename, to, len, &errors); + strconvert(thd_charset(current_thd), from, + &my_charset_filename, to, (uint) len, &errors); } /********************************************************************** @@ -713,8 +797,8 @@ innobase_convert_from_id( { uint errors; - strconvert(current_thd->charset(), from, - system_charset_info, to, len, &errors); + strconvert(thd_charset(current_thd), from, + system_charset_info, to, (uint) len, &errors); } /********************************************************************** @@ -776,7 +860,7 @@ innobase_get_charset( /* out: connection character set */ void* mysql_thd) /* in: MySQL thread handle */ { - return(((THD*) mysql_thd)->charset()); + return(thd_charset((THD*) mysql_thd)); } /************************************************************************* @@ -787,22 +871,9 @@ innobase_mysql_tmpfile(void) /*========================*/ /* out: temporary file descriptor, or < 0 on error */ { - char filename[FN_REFLEN]; int fd2 = -1; - File fd = create_temp_file(filename, mysql_tmpdir, "ib", -#ifdef __WIN__ - O_BINARY | O_TRUNC | O_SEQUENTIAL | - O_TEMPORARY | O_SHORT_LIVED | -#endif /* __WIN__ */ - O_CREAT | O_EXCL | O_RDWR, - MYF(MY_WME)); + File fd = mysql_tmpfile("ib"); if (fd >= 0) { -#ifndef __WIN__ - /* On Windows, open files cannot be removed, but files can be - created with the O_TEMPORARY flag to the same effect - ("delete on close"). */ - unlink(filename); -#endif /* !__WIN__ */ /* Copy the file descriptor, so that the additional resources allocated by create_temp_file() can be freed by invoking my_close(). @@ -817,7 +888,7 @@ innobase_mysql_tmpfile(void) my_errno=errno; my_error(EE_OUT_OF_FILERESOURCES, MYF(ME_BELL+ME_WAITTANG), - filename, my_errno); + "ib*", my_errno); } my_close(fd, MYF(MY_WME)); } @@ -839,8 +910,9 @@ innobase_convert_string( CHARSET_INFO* from_cs, uint* errors) { - return(copy_and_convert((char*)to, to_length, to_cs, - (const char*)from, from_length, from_cs, errors)); + return(copy_and_convert((char*)to, (uint32) to_length, to_cs, + (const char*)from, (uint32) from_length, from_cs, + errors)); } /************************************************************************* @@ -852,43 +924,37 @@ trx_t* check_trx_exists( /*=============*/ /* out: InnoDB transaction handle */ - handlerton* hton, /* in: handlerton for innodb */ THD* thd) /* in: user thread handle */ { - trx_t* trx; + trx_t*& trx = thd_to_trx(thd); ut_ad(thd == current_thd); - trx = (trx_t*) thd->ha_data[hton->slot]; - if (trx == NULL) { DBUG_ASSERT(thd != NULL); trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; - trx->mysql_query_str = &(thd->query); - trx->active_trans = 0; + trx->mysql_query_str = thd_query(thd); /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); - - thd->ha_data[hton->slot] = trx; + trx->support_xa = THDVAR(thd, support_xa); } else { if (trx->magic_n != TRX_MAGIC_N) { mem_analyze_corruption(trx); - ut_a(0); + ut_error; } } - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } else { trx->check_foreigns = TRUE; } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { trx->check_unique_secondary = FALSE; } else { trx->check_unique_secondary = TRUE; @@ -909,6 +975,7 @@ ha_innobase::ha_innobase(handlerton *hton, TABLE_SHARE *table_arg) HA_CAN_SQL_HANDLER | HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_PRIMARY_KEY_IN_READ_INDEX | + HA_BINLOG_ROW_CAPABLE | HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ | HA_TABLE_SCAN_ON_INDEX), start_of_scan(0), @@ -926,10 +993,9 @@ ha_innobase::update_thd( /* out: 0 or error code */ THD* thd) /* in: thd to use the handle */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; - trx = check_trx_exists(ht, thd); + trx = check_trx_exists(thd); if (prebuilt->trx != trx) { @@ -976,7 +1042,7 @@ innobase_register_trx_and_stmt( innobase_register_stmt(hton, thd); - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* No autocommit mode, register for a transaction */ trans_register_ha(thd, TRUE, hton); @@ -1044,7 +1110,7 @@ holding any InnoDB semaphores. The calling thread is holding the query cache mutex, and this function will reserver the InnoDB kernel mutex. Thus, the 'rank' in sync0sync.h of the MySQL query cache mutex is above the InnoDB kernel mutex. */ - +static my_bool innobase_query_caching_of_table_permitted( /*======================================*/ @@ -1068,14 +1134,15 @@ innobase_query_caching_of_table_permitted( ut_a(full_name_len < 999); - if (thd->variables.tx_isolation == ISO_SERIALIZABLE) { + trx = check_trx_exists(thd); + + if (trx->isolation_level == TRX_ISO_SERIALIZABLE) { /* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every plain SELECT if AUTOCOMMIT is not on. */ return((my_bool)FALSE); } - trx = check_trx_exists(legacy_innodb_hton, thd); if (trx->has_search_latch) { ut_print_timestamp(stderr); sql_print_error("The calling thread is holding the adaptive " @@ -1089,7 +1156,7 @@ innobase_query_caching_of_table_permitted( innobase_release_stat_resources(trx); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { is_autocommit = TRUE; } else { @@ -1134,7 +1201,7 @@ innobase_query_caching_of_table_permitted( if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(legacy_innodb_hton, thd); + innobase_register_trx_and_stmt(innodb_hton_ptr, thd); trx->active_trans = 1; } @@ -1172,10 +1239,10 @@ innobase_invalidate_query_cache( /* Argument TRUE below means we are using transactions */ #ifdef HAVE_QUERY_CACHE - query_cache.invalidate((THD*)(trx->mysql_thd), - (const char*)full_name, - (uint32)full_name_len, - TRUE); + mysql_query_cache_invalidate4((THD*) trx->mysql_thd, + (const char*) full_name, + (uint32) full_name_len, + TRUE); #endif } @@ -1203,12 +1270,12 @@ innobase_print_identifier( output strings buffers must not be shared. The function only produces more output when the name contains other characters than [0-9A-Z_a-z]. */ - char* temp_name = my_malloc(namelen + 1, MYF(MY_WME)); - uint qnamelen = namelen - + (1 + sizeof srv_mysql50_table_name_prefix); + char* temp_name = (char*) my_malloc((uint) namelen + 1, MYF(MY_WME)); + uint qnamelen = (uint) (namelen + + (1 + sizeof srv_mysql50_table_name_prefix)); if (temp_name) { - qname = my_malloc(qnamelen, MYF(MY_WME)); + qname = (char*) my_malloc(qnamelen, MYF(MY_WME)); if (qname) { memcpy(temp_name, name, namelen); temp_name[namelen] = 0; @@ -1255,7 +1322,20 @@ trx_is_interrupted( /* out: TRUE if interrupted */ trx_t* trx) /* in: transaction */ { - return(trx && trx->mysql_thd && ((THD*) trx->mysql_thd)->killed); + return(trx && trx->mysql_thd && thd_killed((THD*) trx->mysql_thd)); +} + +/****************************************************************** +Resets some fields of a prebuilt struct. The template is used in fast +retrieval of just those column values MySQL needs in its processing. */ +static +void +reset_template( +/*===========*/ + row_prebuilt_t* prebuilt) /* in/out: prebuilt struct */ +{ + prebuilt->keep_other_fields_on_keyread = 0; + prebuilt->read_just_key = 0; } /********************************************************************* @@ -1269,20 +1349,16 @@ void ha_innobase::init_table_handle_for_HANDLER(void) /*============================================*/ { - row_prebuilt_t* prebuilt; - /* If current thd does not yet have a trx struct, create one. If the current handle does not yet have a prebuilt struct, create one. Update the trx pointers in the prebuilt struct. Normally this operation is done in external_lock. */ - update_thd(current_thd); + update_thd(ha_thd()); /* Initialize the prebuilt struct much like it would be inited in external_lock */ - prebuilt = (row_prebuilt_t*)innobase_prebuilt; - innobase_release_stat_resources(prebuilt->trx); /* If the transaction is not started yet, start it */ @@ -1297,7 +1373,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) if (prebuilt->trx->active_trans == 0) { - innobase_register_trx_and_stmt(ht, current_thd); + innobase_register_trx_and_stmt(ht, user_thd); prebuilt->trx->active_trans = 1; } @@ -1320,19 +1396,18 @@ ha_innobase::init_table_handle_for_HANDLER(void) /* We want always to fetch all columns in the whole row? Or do we???? */ - prebuilt->read_just_key = FALSE; - prebuilt->used_in_HANDLER = TRUE; - - prebuilt->keep_other_fields_on_keyread = FALSE; + reset_template(prebuilt); } /************************************************************************* Opens an InnoDB database. */ - +static int -innobase_init(void *p) -/*===============*/ +innobase_init( +/*==========*/ + /* out: 0 on success, error code on failure */ + void *p) /* in: InnoDB handlerton */ { static char current_dir[3]; /* Set if using current lib */ int err; @@ -1341,9 +1416,9 @@ innobase_init(void *p) DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton *)p; - legacy_innodb_hton= innobase_hton; + innodb_hton_ptr = innobase_hton; - innobase_hton->state=have_innodb; + innobase_hton->state = SHOW_OPTION_YES; innobase_hton->db_type= DB_TYPE_INNODB; innobase_hton->savepoint_offset=sizeof(trx_named_savept_t); innobase_hton->close_connection=innobase_close_connection; @@ -1368,9 +1443,6 @@ innobase_init(void *p) innobase_hton->flags=HTON_NO_FLAGS; innobase_hton->release_temporary_latches=innobase_release_temporary_latches; - if (have_innodb != SHOW_OPTION_YES) - DBUG_RETURN(0); // nothing else to do - ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); #ifdef UNIV_DEBUG @@ -1526,10 +1598,7 @@ innobase_init(void *p) changes the value so that it becomes the number of database pages. */ if (innobase_buffer_pool_awe_mem_mb == 0) { - /* Careful here: we first convert the signed long int to ulint - and only after that divide */ - - srv_pool_size = ((ulint) innobase_buffer_pool_size) / 1024; + srv_pool_size = (ulint)(innobase_buffer_pool_size / 1024); } else { srv_use_awe = TRUE; srv_pool_size = (ulint) @@ -1552,8 +1621,10 @@ innobase_init(void *p) srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; srv_use_checksums = (ibool) innobase_use_checksums; - os_use_large_pages = (ibool) innobase_use_large_pages; - os_large_page_size = (ulint) innobase_large_page_size; +#ifdef HAVE_LARGE_PAGES + if ((os_use_large_pages = (ibool) my_use_large_pages)) + os_large_page_size = (ulint) opt_large_page_size; +#endif row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout; @@ -1563,6 +1634,8 @@ innobase_init(void *p) srv_max_n_open_files = (ulint) innobase_open_files; srv_innodb_status = (ibool) innobase_create_status_file; + srv_stats_on_metadata = (ibool) innobase_stats_on_metadata; + srv_print_verbose_log = mysqld_embedded ? 0 : 1; /* Store the default charset-collation number of this MySQL @@ -1610,13 +1683,12 @@ innobase_init(void *p) DBUG_RETURN(FALSE); error: - have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler DBUG_RETURN(TRUE); } /*********************************************************************** Closes an InnoDB database. */ - +static int innobase_end(handlerton *hton, ha_panic_function type) /*==============*/ @@ -1654,7 +1726,7 @@ innobase_end(handlerton *hton, ha_panic_function type) /******************************************************************** Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes the logs, and the name of this function should be innobase_checkpoint. */ - +static bool innobase_flush_logs(handlerton *hton) /*=====================*/ @@ -1671,7 +1743,7 @@ innobase_flush_logs(handlerton *hton) /********************************************************************* Commits a transaction in an InnoDB database. */ - +static void innobase_commit_low( /*================*/ @@ -1690,7 +1762,7 @@ Creates an InnoDB transaction struct for the thd if it does not yet have one. Starts a new InnoDB transaction if a transaction is not yet started. And assigns a new snapshot for a consistent read if the transaction does not yet have one. */ - +static int innobase_start_trx_and_assign_read_view( /*====================================*/ @@ -1705,7 +1777,7 @@ innobase_start_trx_and_assign_read_view( /* Create a new trx struct for thd, if it does not yet have one */ - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* This is just to play safe: release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to @@ -1750,17 +1822,16 @@ innobase_commit( DBUG_ENTER("innobase_commit"); DBUG_PRINT("trans", ("ending transaction")); - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + trx->support_xa = THDVAR(thd, support_xa); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the kernel mutex, we have to release the search system latch - first to obey the latching order. */ + /* Since we will reserve the kernel mutex, we have to release + the search system latch first to obey the latching order. */ if (trx->has_search_latch) { - trx_search_latch_release_if_reserved(trx); + trx_search_latch_release_if_reserved(trx); } /* The flag trx->active_trans is set to 1 in @@ -1785,7 +1856,7 @@ innobase_commit( " trx->conc_state != TRX_NOT_STARTED"); } if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to commit the whole transaction, or this is an SQL statement end and autocommit is on */ @@ -1810,9 +1881,8 @@ retry: } } - trx->mysql_log_file_name = mysql_bin_log.get_log_fname(); - trx->mysql_log_offset = - (ib_longlong)mysql_bin_log.get_log_file()->pos_in_file; + trx->mysql_log_file_name = mysql_bin_log_file_name(); + trx->mysql_log_offset = (ib_longlong) mysql_bin_log_file_pos(); innobase_commit_low(trx); @@ -1834,12 +1904,11 @@ retry: /* We just mark the SQL statement ended and do not do a transaction commit */ - if (trx->auto_inc_lock) { - /* If we had reserved the auto-inc lock for some - table in this SQL statement we release it now */ + /* If we had reserved the auto-inc lock for some + table in this SQL statement we release it now */ + + row_unlock_table_autoinc_for_mysql(trx); - row_unlock_table_autoinc_for_mysql(trx); - } /* Store the current undo_no of the transaction so that we know where to roll back if we have to roll back the next SQL statement */ @@ -1847,127 +1916,25 @@ retry: trx_mark_sql_stat_end(trx); } - /* Tell the InnoDB server that there might be work for utility - threads: */ + trx->n_autoinc_rows = 0; /* Reset the number AUTO-INC rows required */ + if (trx->declared_to_be_inside_innodb) { - /* Release our possible ticket in the FIFO */ + /* Release our possible ticket in the FIFO */ - srv_conc_force_exit_innodb(trx); + srv_conc_force_exit_innodb(trx); } + + /* Tell the InnoDB server that there might be work for utility + threads: */ srv_active_wake_master_thread(); DBUG_RETURN(0); } -/* TODO: put the -MySQL-4.1 functionality back to 5.0. This is needed to get InnoDB Hot Backup -to work. */ - -/********************************************************************* -This is called when MySQL writes the binlog entry for the current -transaction. Writes to the InnoDB tablespace info which tells where the -MySQL binlog entry for the current transaction ended. Also commits the -transaction inside InnoDB but does NOT flush InnoDB log files to disk. -To flush you have to call innobase_commit_complete(). We have separated -flushing to eliminate the bottleneck of LOCK_log in log.cc which disabled -InnoDB's group commit capability. */ - -int -innobase_report_binlog_offset_and_commit( -/*=====================================*/ - /* out: 0 */ - handlerton *hton, /* in: Innodb handlerton */ - THD* thd, /* in: user thread */ - void* trx_handle, /* in: InnoDB trx handle */ - char* log_file_name, /* in: latest binlog file name */ - my_off_t end_offset) /* in: the offset in the binlog file - up to which we wrote */ -{ - trx_t* trx; - - trx = (trx_t*)trx_handle; - - ut_a(trx != NULL); - - trx->mysql_log_file_name = log_file_name; - trx->mysql_log_offset = (ib_longlong)end_offset; - - trx->flush_log_later = TRUE; - - innobase_commit(hton, thd, TRUE); - - trx->flush_log_later = FALSE; - - return(0); -} - -#if 0 -/*********************************************************************** -This function stores the binlog offset and flushes logs. */ - -void -innobase_store_binlog_offset_and_flush_log( -/*=======================================*/ - char* binlog_name, /* in: binlog name */ - longlong offset) /* in: binlog offset */ -{ - mtr_t mtr; - - assert(binlog_name != NULL); - - /* Start a mini-transaction */ - mtr_start_noninline(&mtr); - - /* Update the latest MySQL binlog name and offset info - in trx sys header */ - - trx_sys_update_mysql_binlog_offset( - binlog_name, - offset, - TRX_SYS_MYSQL_LOG_INFO, &mtr); - - /* Commits the mini-transaction */ - mtr_commit(&mtr); - - /* Synchronous flush of the log buffer to disk */ - log_buffer_flush_to_disk(); -} -#endif - -/********************************************************************* -This is called after MySQL has written the binlog entry for the current -transaction. Flushes the InnoDB log files to disk if required. */ - -int -innobase_commit_complete( -/*=====================*/ - /* out: 0 */ - handlerton *hton, /* in: Innodb handlerton */ - THD* thd) /* in: user thread */ -{ - trx_t* trx; - - trx = (trx_t*) thd->ha_data[hton->slot]; - - if (trx && trx->active_trans) { - - trx->active_trans = 0; - - if (UNIV_UNLIKELY(srv_flush_log_at_trx_commit == 0)) { - - return(0); - } - - trx_commit_complete_for_mysql(trx); - } - - return(0); -} - /********************************************************************* Rolls back a transaction or the latest SQL statement. */ - -static int +static +int innobase_rollback( /*==============*/ /* out: 0 or error number */ @@ -1983,10 +1950,10 @@ innobase_rollback( DBUG_ENTER("innobase_rollback"); DBUG_PRINT("trans", ("aborting transaction")); - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + trx->support_xa = THDVAR(thd, support_xa); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -1994,16 +1961,14 @@ innobase_rollback( innobase_release_stat_resources(trx); - if (trx->auto_inc_lock) { - /* If we had reserved the auto-inc lock for some table (if - we come here to roll back the latest SQL statement) we - release it now before a possibly lengthy rollback */ + /* If we had reserved the auto-inc lock for some table (if + we come here to roll back the latest SQL statement) we + release it now before a possibly lengthy rollback */ - row_unlock_table_autoinc_for_mysql(trx); - } + row_unlock_table_autoinc_for_mysql(trx); if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { error = trx_rollback_for_mysql(trx); trx->active_trans = 0; @@ -2016,7 +1981,7 @@ innobase_rollback( /********************************************************************* Rolls back a transaction */ - +static int innobase_rollback_trx( /*==================*/ @@ -2034,13 +1999,11 @@ innobase_rollback_trx( innobase_release_stat_resources(trx); - if (trx->auto_inc_lock) { - /* If we had reserved the auto-inc lock for some table (if - we come here to roll back the latest SQL statement) we - release it now before a possibly lengthy rollback */ + /* If we had reserved the auto-inc lock for some table (if + we come here to roll back the latest SQL statement) we + release it now before a possibly lengthy rollback */ - row_unlock_table_autoinc_for_mysql(trx); - } + row_unlock_table_autoinc_for_mysql(trx); error = trx_rollback_for_mysql(trx); @@ -2049,8 +2012,8 @@ innobase_rollback_trx( /********************************************************************* Rolls back a transaction to a savepoint. */ - -static int +static +int innobase_rollback_to_savepoint( /*===========================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if @@ -2067,7 +2030,7 @@ innobase_rollback_to_savepoint( DBUG_ENTER("innobase_rollback_to_savepoint"); - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2103,7 +2066,7 @@ innobase_release_savepoint( DBUG_ENTER("innobase_release_savepoint"); - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* TODO: use provided savepoint data area to store savepoint data */ @@ -2135,10 +2098,12 @@ innobase_savepoint( (unless we are in sub-statement), so SQL layer ensures that this method is never called in such situation. */ - DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || +#ifdef MYSQL_SERVER /* plugins cannot access thd->in_sub_stmt */ + DBUG_ASSERT(thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || thd->in_sub_stmt); +#endif /* MYSQL_SERVER */ - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2171,7 +2136,9 @@ innobase_close_connection( { trx_t* trx; - trx = (trx_t*)thd->ha_data[hton->slot]; + DBUG_ENTER("innobase_close_connection"); + DBUG_ASSERT(hton == innodb_hton_ptr); + trx = thd_to_trx(thd); ut_a(trx); @@ -2197,7 +2164,7 @@ innobase_close_connection( thr_local_free(trx->mysql_thread_id); trx_free_for_mysql(trx); - return(0); + DBUG_RETURN(0); } @@ -2212,8 +2179,6 @@ ha_innobase::get_row_type() const /*=============================*/ /* out: ROW_TYPE_REDUNDANT or ROW_TYPE_COMPACT */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - if (prebuilt && prebuilt->table) { if (dict_table_is_comp_noninline(prebuilt->table)) { return(ROW_TYPE_COMPACT); @@ -2225,6 +2190,21 @@ ha_innobase::get_row_type() const return(ROW_TYPE_NOT_USED); } + + +/******************************************************************** +Get the table flags to use for the statement. */ +handler::Table_flags +ha_innobase::table_flags() const +{ + /* Need to use tx_isolation here since table flags is (also) + called before prebuilt is inited. */ + ulong const tx_isolation = thd_tx_isolation(current_thd); + if (tx_isolation <= ISO_READ_COMMITTED) + return int_table_flags; + return int_table_flags | HA_BINLOG_STMT_CAPABLE; +} + /******************************************************************** Gives the file extension of an InnoDB single-table tablespace. */ static const char* ha_innobase_exts[] = { @@ -2308,13 +2288,11 @@ ha_innobase::open( UT_NOT_USED(mode); UT_NOT_USED(test_if_locked); - thd = current_thd; + thd = ha_thd(); normalize_table_name(norm_name, name); user_thd = NULL; - last_query_id = (ulong)-1; - if (!(share=get_share(name))) { DBUG_RETURN(1); @@ -2328,7 +2306,7 @@ ha_innobase::open( upd_and_key_val_buff_len = table->s->reclength + table->s->max_key_length + MAX_REF_PARTS * 3; - if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME), + if (!(uchar*) my_multi_malloc(MYF(MY_WME), &upd_buff, upd_and_key_val_buff_len, &key_val_buff, upd_and_key_val_buff_len, NullS)) { @@ -2354,13 +2332,13 @@ ha_innobase::open( "how you can resolve the problem.\n", norm_name); free_share(share); - my_free((gptr) upd_buff, MYF(0)); + my_free(upd_buff, MYF(0)); my_errno = ENOENT; DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - if (ib_table->ibd_file_missing && !thd->tablespace_op) { + if (ib_table->ibd_file_missing && !thd_tablespace_op(thd)) { ut_print_timestamp(stderr); sql_print_error("MySQL is trying to open a table handle but " "the .ibd file for\ntable %s does not exist.\n" @@ -2371,17 +2349,16 @@ ha_innobase::open( "how you can resolve the problem.\n", norm_name); free_share(share); - my_free((gptr) upd_buff, MYF(0)); + my_free(upd_buff, MYF(0)); my_errno = ENOENT; dict_table_decrement_handle_count(ib_table); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - innobase_prebuilt = row_create_prebuilt(ib_table); + prebuilt = row_create_prebuilt(ib_table); - ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = - table->s->reclength; + prebuilt->mysql_row_len = table->s->reclength; /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ @@ -2400,8 +2377,8 @@ ha_innobase::open( "dictionary, but not in MySQL!", name); } - ((row_prebuilt_t*)innobase_prebuilt) - ->clust_index_was_generated = FALSE; + prebuilt->clust_index_was_generated = FALSE; + /* MySQL allocates the buffer for ref. key_info->key_length includes space for all key columns + one byte for each column that may be NULL. ref_length must be as exact as possible to @@ -2422,8 +2399,7 @@ ha_innobase::open( "of the table.", name); } - ((row_prebuilt_t*)innobase_prebuilt) - ->clust_index_was_generated = TRUE; + prebuilt->clust_index_was_generated = TRUE; ref_length = DATA_ROW_ID_LEN; @@ -2468,11 +2444,18 @@ ha_innobase::close(void) /*====================*/ /* out: 0 */ { + THD* thd; + DBUG_ENTER("ha_innobase::close"); - row_prebuilt_free((row_prebuilt_t*) innobase_prebuilt); + thd = current_thd; // avoid calling current_thd twice, it may be slow + if (thd != NULL) { + innobase_release_temporary_latches(ht, thd); + } + + row_prebuilt_free(prebuilt); - my_free((gptr) upd_buff, MYF(0)); + my_free(upd_buff, MYF(0)); free_share(share); /* Tell InnoDB server that there might be work for @@ -2495,7 +2478,7 @@ get_field_offset( TABLE* table, /* in: MySQL table object */ Field* field) /* in: MySQL field object */ { - return((uint) (field->ptr - (char*) table->record[0])); + return((uint) (field->ptr - table->record[0])); } /****************************************************************** @@ -2757,8 +2740,8 @@ inline uint innobase_read_from_2_little_endian( /*===============================*/ - /* out: value */ - const mysql_byte* buf) /* in: from where to read */ + /* out: value */ + const uchar* buf) /* in: from where to read */ { return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1]))); } @@ -2774,7 +2757,7 @@ ha_innobase::store_key_val_for_row( char* buff, /* in/out: buffer for the key value (in MySQL format) */ uint buff_len,/* in: buffer length */ - const mysql_byte* record)/* in: row in MySQL format */ + const uchar* record)/* in: row in MySQL format */ { KEY* key_info = table->key_info + keynr; KEY_PART_INFO* key_part = key_info->key_part; @@ -2866,7 +2849,8 @@ ha_innobase::store_key_val_for_row( true_len = (ulint) cs->cset->well_formed_len(cs, (const char *) data, (const char *) data + len, - key_len / cs->mbmaxlen, + (uint) (key_len / + cs->mbmaxlen), &error); } @@ -2935,7 +2919,8 @@ ha_innobase::store_key_val_for_row( (const char *) blob_data, (const char *) blob_data + blob_len, - key_len / cs->mbmaxlen, + (uint) (key_len / + cs->mbmaxlen), &error); } @@ -2969,7 +2954,7 @@ ha_innobase::store_key_val_for_row( CHARSET_INFO* cs; ulint true_len; ulint key_len; - const mysql_byte* src_start; + const uchar* src_start; int error=0; enum_field_types real_type; @@ -3007,7 +2992,8 @@ ha_innobase::store_key_val_for_row( (const char *)src_start, (const char *)src_start + key_len, - key_len / cs->mbmaxlen, + (uint) (key_len / + cs->mbmaxlen), &error); } } @@ -3041,7 +3027,7 @@ static void build_template( /*===========*/ - row_prebuilt_t* prebuilt, /* in: prebuilt struct */ + row_prebuilt_t* prebuilt, /* in/out: prebuilt struct */ THD* thd, /* in: current user thread, used only if templ_type is ROW_MYSQL_REC_FIELDS */ @@ -3248,53 +3234,158 @@ skip_field: } /************************************************************************ +This special handling is really to overcome the limitations of MySQL's +binlogging. We need to eliminate the non-determinism that will arise in +INSERT ... SELECT type of statements, since MySQL binlog only stores the +min value of the autoinc interval. Once that is fixed we can get rid of +the special lock handling.*/ + +ulong +ha_innobase::innobase_autoinc_lock(void) +/*====================================*/ + /* out: DB_SUCCESS if all OK else + error code */ +{ + ulint error = DB_SUCCESS; + + switch (innobase_autoinc_lock_mode) { + case AUTOINC_NO_LOCKING: + /* Acquire only the AUTOINC mutex. */ + dict_table_autoinc_lock(prebuilt->table); + break; + + case AUTOINC_NEW_STYLE_LOCKING: + /* For simple (single/multi) row INSERTs, we fallback to the + old style only if another transaction has already acquired + the AUTOINC lock on behalf of a LOAD FILE or INSERT ... SELECT + etc. type of statement. */ + if (thd_sql_command(user_thd) == SQLCOM_INSERT) { + dict_table_t* table = prebuilt->table; + + /* Acquire the AUTOINC mutex. */ + dict_table_autoinc_lock(table); + + /* We need to check that another transaction isn't + already holding the AUTOINC lock on the table. */ + if (table->n_waiting_or_granted_auto_inc_locks) { + /* Release the mutex to avoid deadlocks. */ + dict_table_autoinc_unlock(table); + } else { + break; + } + } + /* Fall through to old style locking. */ + + case AUTOINC_OLD_STYLE_LOCKING: + error = row_lock_table_autoinc_for_mysql(prebuilt); + + if (error == DB_SUCCESS) { + + /* Acquire the AUTOINC mutex. */ + dict_table_autoinc_lock(prebuilt->table); + } + break; + + default: + ut_error; + } + + return(ulong(error)); +} + +/************************************************************************ +Reset the autoinc value in the table.*/ + +ulong +ha_innobase::innobase_reset_autoinc( +/*================================*/ + /* out: DB_SUCCESS if all went well + else error code */ + ulonglong autoinc) /* in: value to store */ +{ + ulint error; + + error = innobase_autoinc_lock(); + + if (error == DB_SUCCESS) { + + dict_table_autoinc_initialize(prebuilt->table, autoinc); + + dict_table_autoinc_unlock(prebuilt->table); + } + + return(ulong(error)); +} + +/************************************************************************ +Store the autoinc value in the table. The autoinc value is only set if +it's greater than the existing autoinc value in the table.*/ + +ulong +ha_innobase::innobase_set_max_autoinc( +/*==================================*/ + /* out: DB_SUCCES if all went well + else error code */ + ulonglong auto_inc) /* in: value to store */ +{ + ulint error; + + error = innobase_autoinc_lock(); + + if (error == DB_SUCCESS) { + + dict_table_autoinc_update(prebuilt->table, auto_inc); + + dict_table_autoinc_unlock(prebuilt->table); + } + + return(ulong(error)); +} + +/************************************************************************ Stores a row in an InnoDB database, to the table specified in this handle. */ int ha_innobase::write_row( /*===================*/ - /* out: error code */ - mysql_byte* record) /* in: a row in MySQL format */ + /* out: error code */ + uchar* record) /* in: a row in MySQL format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; - int error; - longlong auto_inc; - longlong dummy; + int error = 0; ibool auto_inc_used= FALSE; + ulint sql_command; + trx_t* trx = thd_to_trx(user_thd); DBUG_ENTER("ha_innobase::write_row"); - if (prebuilt->trx != - (trx_t*) current_thd->ha_data[ht->slot]) { + if (prebuilt->trx != trx) { sql_print_error("The transaction object for the table handle is at " "%p, but for the current thread it is at %p", - prebuilt->trx, - (trx_t*) current_thd->ha_data[ht->slot]); + prebuilt->trx, trx); fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); ut_print_buf(stderr, ((const byte*)prebuilt) - 100, 200); fputs("\n" - "InnoDB: Dump of 200 bytes around transaction.all: ", + "InnoDB: Dump of 200 bytes around ha_data: ", stderr); - ut_print_buf(stderr, - ((byte*)(&(current_thd->ha_data[ht->slot]))) - 100, - 200); + ut_print_buf(stderr, ((const byte*) trx) - 100, 200); putc('\n', stderr); ut_error; } - statistic_increment(current_thd->status_var.ha_write_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); - if ((user_thd->lex->sql_command == SQLCOM_ALTER_TABLE - || user_thd->lex->sql_command == SQLCOM_OPTIMIZE - || user_thd->lex->sql_command == SQLCOM_CREATE_INDEX - || user_thd->lex->sql_command == SQLCOM_DROP_INDEX) - && num_write_row >= 10000) { + sql_command = thd_sql_command(user_thd); + + if ((sql_command == SQLCOM_ALTER_TABLE + || sql_command == SQLCOM_OPTIMIZE + || sql_command == SQLCOM_CREATE_INDEX + || sql_command == SQLCOM_DROP_INDEX) + && num_write_row >= 10000) { /* ALTER TABLE is COMMITted at every 10000 copied rows. The IX table lock for the original table has to be re-issued. As this method will be called on a temporary table where the @@ -3360,69 +3451,20 @@ no_commit: num_write_row++; - if (last_query_id != user_thd->query_id) { - prebuilt->sql_stat_start = TRUE; - last_query_id = user_thd->query_id; - - innobase_release_stat_resources(prebuilt->trx); - } - + /* This is the case where the table has an auto-increment column */ if (table->next_number_field && record == table->record[0]) { - /* This is the case where the table has an - auto-increment column */ - - /* Initialize the auto-inc counter if it has not been - initialized yet */ - - if (0 == dict_table_autoinc_peek(prebuilt->table)) { - - /* This call initializes the counter */ - error = innobase_read_and_init_auto_inc(&dummy); - - if (error) { - /* Deadlock or lock wait timeout */ - - goto func_exit; - } - - /* We have to set sql_stat_start to TRUE because - the above call probably has called a select, and - has reset that flag; row_insert_for_mysql has to - know to set the IX intention lock on the table, - something it only does at the start of each - statement */ - - prebuilt->sql_stat_start = TRUE; - } - - /* We have to use the transactional lock mechanism on the - auto-inc counter of the table to ensure that replication and - roll-forward of the binlog exactly imitates also the given - auto-inc values. The lock is released at each SQL statement's - end. This lock also prevents a race where two threads would - call ::get_auto_increment() simultaneously. */ - error = row_lock_table_autoinc_for_mysql(prebuilt); - - if (error != DB_SUCCESS) { - /* Deadlock or lock wait timeout */ - - error = convert_error_code_to_mysql(error, user_thd); + if ((error = update_auto_increment())) { goto func_exit; } - /* We must use the handler code to update the auto-increment - value to be sure that we increment it correctly. */ - - if ((error= update_auto_increment())) - goto func_exit; - auto_inc_used = 1; - + auto_inc_used = TRUE; } if (prebuilt->mysql_template == NULL - || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + /* Build the template used in converting quickly between the two database formats */ @@ -3433,34 +3475,64 @@ no_commit: error = row_insert_for_mysql((byte*) record, prebuilt); - if (error == DB_SUCCESS && auto_inc_used) { + /* Handle duplicate key errors */ + if (auto_inc_used) { + ulonglong auto_inc; - /* Fetch the value that was set in the autoincrement field */ + /* Note the number of rows processed for this statement, used + by get_auto_increment() to determine the number of AUTO-INC + values to reserve. This is only useful for a mult-value INSERT + and is a statement level counter.*/ + if (trx->n_autoinc_rows > 0) { + --trx->n_autoinc_rows; + } + /* Get the value that MySQL attempted to store in the table.*/ auto_inc = table->next_number_field->val_int(); - if (auto_inc != 0) { - /* This call will update the counter according to the - value that was inserted in the table */ + switch (error) { + case DB_DUPLICATE_KEY: - dict_table_autoinc_update(prebuilt->table, auto_inc); - } - } + /* A REPLACE command and LOAD DATA INFILE REPLACE + handle a duplicate key error themselves, but we + must update the autoinc counter if we are performing + those statements. */ - /* A REPLACE command and LOAD DATA INFILE REPLACE handle a duplicate - key error themselves, and we must update the autoinc counter if we are - performing those statements. */ + switch (sql_command) { + case SQLCOM_LOAD: + if ((trx->duplicates + & (TRX_DUP_IGNORE | TRX_DUP_REPLACE))) { - if (error == DB_DUPLICATE_KEY && auto_inc_used - && (user_thd->lex->sql_command == SQLCOM_REPLACE - || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT - || (user_thd->lex->sql_command == SQLCOM_LOAD - && user_thd->lex->duplicates == DUP_REPLACE))) { + goto set_max_autoinc; + } + break; - auto_inc = table->next_number_field->val_int(); + case SQLCOM_REPLACE: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE_SELECT: + goto set_max_autoinc; + break; - if (auto_inc != 0) { - dict_table_autoinc_update(prebuilt->table, auto_inc); + default: + break; + } + + break; + + case DB_SUCCESS: + /* If the actual value inserted is greater than + the upper limit of the interval, then we try and + update the table upper limit. Note: last_value + will be 0 if get_auto_increment() was not called.*/ + + if (auto_inc > prebuilt->last_value) { +set_max_autoinc: + ut_a(prebuilt->table->autoinc_increment > 0); + auto_inc += prebuilt->table->autoinc_increment; + + innobase_set_max_autoinc(auto_inc); + } + break; } } @@ -3468,8 +3540,6 @@ no_commit: error = convert_error_code_to_mysql(error, user_thd); - /* Tell InnoDB server that there might be work for - utility threads: */ func_exit: innobase_active_small(); @@ -3485,16 +3555,16 @@ calc_row_difference( /*================*/ /* out: error number or 0 */ upd_t* uvect, /* in/out: update vector */ - mysql_byte* old_row, /* in: old row in MySQL format */ - mysql_byte* new_row, /* in: new row in MySQL format */ + uchar* old_row, /* in: old row in MySQL format */ + uchar* new_row, /* in: new row in MySQL format */ struct st_table* table, /* in: table in MySQL data dictionary */ - mysql_byte* upd_buff, /* in: buffer to use */ + uchar* upd_buff, /* in: buffer to use */ ulint buff_len, /* in: buffer length */ row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */ THD* thd) /* in: user thread */ { - mysql_byte* original_upd_buff = upd_buff; + uchar* original_upd_buff = upd_buff; Field* field; enum_field_types field_mysql_type; uint n_fields; @@ -3521,13 +3591,6 @@ calc_row_difference( for (i = 0; i < n_fields; i++) { field = table->field[i]; - /* if (thd->query_id != field->query_id) { */ - /* TODO: check that these fields cannot have - changed! */ - - /* goto skip_field; - }*/ - o_ptr = (byte*) old_row + get_field_offset(table, field); n_ptr = (byte*) new_row + get_field_offset(table, field); @@ -3645,28 +3708,20 @@ int ha_innobase::update_row( /*====================*/ /* out: error number or 0 */ - const mysql_byte* old_row,/* in: old row in MySQL format */ - mysql_byte* new_row)/* in: new row in MySQL format */ + const uchar* old_row, /* in: old row in MySQL format */ + uchar* new_row) /* in: new row in MySQL format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; upd_t* uvect; int error = 0; + trx_t* trx = thd_to_trx(user_thd); DBUG_ENTER("ha_innobase::update_row"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == trx); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); - if (last_query_id != user_thd->query_id) { - prebuilt->sql_stat_start = TRUE; - last_query_id = user_thd->query_id; - - innobase_release_stat_resources(prebuilt->trx); - } - if (prebuilt->upd_node) { uvect = prebuilt->upd_node->update; } else { @@ -3676,7 +3731,7 @@ ha_innobase::update_row( /* Build an update vector from the modified fields in the rows (uses upd_buff of the handle) */ - calc_row_difference(uvect, (mysql_byte*) old_row, new_row, table, + calc_row_difference(uvect, (uchar*) old_row, new_row, table, upd_buff, (ulint)upd_and_key_val_buff_len, prebuilt, user_thd); @@ -3685,11 +3740,37 @@ ha_innobase::update_row( assert(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); - innodb_srv_conc_enter_innodb(prebuilt->trx); + innodb_srv_conc_enter_innodb(trx); error = row_update_for_mysql((byte*) old_row, prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); + /* We need to do some special AUTOINC handling for the following case: + + INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... + + We need to use the AUTOINC counter that was actually used by + MySQL in the UPDATE statement, which can be different from the + value used in the INSERT statement.*/ + + if (error == DB_SUCCESS + && table->next_number_field + && new_row == table->record[0] + && thd_sql_command(user_thd) == SQLCOM_INSERT + && (trx->duplicates & (TRX_DUP_IGNORE | TRX_DUP_REPLACE)) + == TRX_DUP_IGNORE) { + + longlong auto_inc; + + auto_inc = table->next_number_field->val_int(); + + if (auto_inc != 0) { + auto_inc += prebuilt->table->autoinc_increment; + + innobase_set_max_autoinc(auto_inc); + } + } + + innodb_srv_conc_exit_innodb(trx); error = convert_error_code_to_mysql(error, user_thd); @@ -3707,22 +3788,38 @@ Deletes a row given as the parameter. */ int ha_innobase::delete_row( /*====================*/ - /* out: error number or 0 */ - const mysql_byte* record) /* in: a row in MySQL format */ + /* out: error number or 0 */ + const uchar* record) /* in: a row in MySQL format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; int error = 0; + trx_t* trx = thd_to_trx(user_thd); DBUG_ENTER("ha_innobase::delete_row"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == trx); + + /* Only if the table has an AUTOINC column */ + if (table->found_next_number_field && record == table->record[0]) { + ulonglong dummy = 0; + + /* First check whether the AUTOINC sub-system has been + initialized using the AUTOINC mutex. If not then we + do it the "proper" way, by acquiring the heavier locks. */ + dict_table_autoinc_lock(prebuilt->table); + + if (!prebuilt->table->autoinc_inited) { + dict_table_autoinc_unlock(prebuilt->table); - if (last_query_id != user_thd->query_id) { - prebuilt->sql_stat_start = TRUE; - last_query_id = user_thd->query_id; + error = innobase_get_auto_increment(&dummy); - innobase_release_stat_resources(prebuilt->trx); + if (error == DB_SUCCESS) { + dict_table_autoinc_unlock(prebuilt->table); + } else { + goto error_exit; + } + } else { + dict_table_autoinc_unlock(prebuilt->table); + } } if (!prebuilt->upd_node) { @@ -3733,12 +3830,13 @@ ha_innobase::delete_row( prebuilt->upd_node->is_delete = TRUE; - innodb_srv_conc_enter_innodb(prebuilt->trx); + innodb_srv_conc_enter_innodb(trx); error = row_update_for_mysql((byte*) record, prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); + innodb_srv_conc_exit_innodb(trx); +error_exit: error = convert_error_code_to_mysql(error, user_thd); /* Tell the InnoDB server that there might be work for @@ -3758,19 +3856,8 @@ void ha_innobase::unlock_row(void) /*=========================*/ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - DBUG_ENTER("ha_innobase::unlock_row"); - if (UNIV_UNLIKELY(last_query_id != user_thd->query_id)) { - ut_print_timestamp(stderr); - sql_print_error("last_query_id is %lu != user_thd_query_id is " - "%lu", (ulong) last_query_id, - (ulong) user_thd->query_id); - mem_analyze_corruption((byte *) prebuilt->trx); - ut_error; - } - /* Consistent read does not take any locks, thus there is nothing to unlock. */ @@ -3801,8 +3888,6 @@ bool ha_innobase::was_semi_consistent_read(void) /*=======================================*/ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); } @@ -3811,17 +3896,15 @@ void ha_innobase::try_semi_consistent_read(bool yes) /*===========================================*/ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); /* Row read type is set to semi consistent read if this was requested by the MySQL and either innodb_locks_unsafe_for_binlog option is used or this session is using READ COMMITTED isolation level. */ - if (yes && (srv_locks_unsafe_for_binlog + if (yes + && (srv_locks_unsafe_for_binlog || prebuilt->trx->isolation_level == TRX_ISO_READ_COMMITTED)) { prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; } else { @@ -3957,9 +4040,9 @@ ha_innobase::index_read( /*====================*/ /* out: 0, HA_ERR_KEY_NOT_FOUND, or error number */ - mysql_byte* buf, /* in/out: buffer for the returned + uchar* buf, /* in/out: buffer for the returned row */ - const mysql_byte* key_ptr,/* in: key value; if this is NULL + const uchar* key_ptr, /* in: key value; if this is NULL we position the cursor at the start or end of index; this can also contain an InnoDB row id, in @@ -3971,7 +4054,6 @@ ha_innobase::index_read( uint key_len,/* in: key value length */ enum ha_rkey_function find_flag)/* in: search flags from my_base.h */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; ulint mode; dict_index_t* index; ulint match_mode = 0; @@ -3980,18 +4062,9 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); - - statistic_increment(current_thd->status_var.ha_read_key_count, - &LOCK_status); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); - if (last_query_id != user_thd->query_id) { - prebuilt->sql_stat_start = TRUE; - last_query_id = user_thd->query_id; - - innobase_release_stat_resources(prebuilt->trx); - } + ha_statistic_increment(&SSV::ha_read_key_count); index = prebuilt->index; @@ -4066,57 +4139,78 @@ row with the current key value or prefix. */ int ha_innobase::index_read_last( /*=========================*/ - /* out: 0, HA_ERR_KEY_NOT_FOUND, or an - error code */ - mysql_byte* buf, /* out: fetched row */ - const mysql_byte* key_ptr, /* in: key value, or a prefix of a full - key value */ - uint key_len) /* in: length of the key val or prefix - in bytes */ + /* out: 0, HA_ERR_KEY_NOT_FOUND, or an + error code */ + uchar* buf, /* out: fetched row */ + const uchar* key_ptr,/* in: key value, or a prefix of a full + key value */ + uint key_len)/* in: length of the key val or prefix + in bytes */ { return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST)); } /************************************************************************ -Changes the active index of a handle. */ +Get the index for a handle. Does not change active index.*/ -int -ha_innobase::change_active_index( -/*=============================*/ - /* out: 0 or error code */ - uint keynr) /* in: use this index; MAX_KEY means always clustered - index, even if it was internally generated by - InnoDB */ +dict_index_t* +ha_innobase::innobase_get_index( +/*============================*/ + /* out: NULL or index instance. */ + uint keynr) /* in: use this index; MAX_KEY means always + clustered index, even if it was internally + generated by InnoDB */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - KEY* key=0; - statistic_increment(current_thd->status_var.ha_read_key_count, - &LOCK_status); - DBUG_ENTER("change_active_index"); + KEY* key = 0; + dict_index_t* index = 0; - ut_ad(user_thd == current_thd); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + DBUG_ENTER("innobase_get_index"); + ha_statistic_increment(&SSV::ha_read_key_count); - active_index = keynr; + ut_ad(user_thd == ha_thd()); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); if (keynr != MAX_KEY && table->s->keys > 0) { - key = table->key_info + active_index; + key = table->key_info + keynr; - prebuilt->index = dict_table_get_index_noninline( + index = dict_table_get_index_noninline( prebuilt->table, key->name); } else { - prebuilt->index = dict_table_get_first_index_noninline( - prebuilt->table); + index = dict_table_get_first_index_noninline(prebuilt->table); } - if (!prebuilt->index) { + if (!index) { sql_print_error( "Innodb could not find key n:o %u with name %s " "from dict cache for table %s", keynr, key ? key->name : "NULL", prebuilt->table->name); + } + + DBUG_RETURN(index); +} +/************************************************************************ +Changes the active index of a handle. */ + +int +ha_innobase::change_active_index( +/*=============================*/ + /* out: 0 or error code */ + uint keynr) /* in: use this index; MAX_KEY means always clustered + index, even if it was internally generated by + InnoDB */ +{ + DBUG_ENTER("change_active_index"); + + ut_ad(user_thd == ha_thd()); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); + + active_index = keynr; + + prebuilt->index = innobase_get_index(keynr); + + if (!prebuilt->index) { DBUG_RETURN(1); } @@ -4147,10 +4241,10 @@ int ha_innobase::index_read_idx( /*========================*/ /* out: error number or 0 */ - mysql_byte* buf, /* in/out: buffer for the returned + uchar* buf, /* in/out: buffer for the returned row */ uint keynr, /* in: use this index */ - const mysql_byte* key, /* in: key value; if this is NULL + const uchar* key, /* in: key value; if this is NULL we position the cursor at the start or end of index */ uint key_len, /* in: key value length */ @@ -4173,20 +4267,18 @@ ha_innobase::general_fetch( /*=======================*/ /* out: 0, HA_ERR_END_OF_FILE, or error number */ - mysql_byte* buf, /* in/out: buffer for next row in MySQL + uchar* buf, /* in/out: buffer for next row in MySQL format */ uint direction, /* in: ROW_SEL_NEXT or ROW_SEL_PREV */ uint match_mode) /* in: 0, ROW_SEL_EXACT, or ROW_SEL_EXACT_PREFIX */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; ulint ret; int error = 0; DBUG_ENTER("general_fetch"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -4222,11 +4314,10 @@ ha_innobase::index_next( /*====================*/ /* out: 0, HA_ERR_END_OF_FILE, or error number */ - mysql_byte* buf) /* in/out: buffer for next row in MySQL + uchar* buf) /* in/out: buffer for next row in MySQL format */ { - statistic_increment(current_thd->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); return(general_fetch(buf, ROW_SEL_NEXT, 0)); } @@ -4239,12 +4330,11 @@ ha_innobase::index_next_same( /*=========================*/ /* out: 0, HA_ERR_END_OF_FILE, or error number */ - mysql_byte* buf, /* in/out: buffer for the row */ - const mysql_byte* key, /* in: key value */ + uchar* buf, /* in/out: buffer for the row */ + const uchar* key, /* in: key value */ uint keylen) /* in: key value length */ { - statistic_increment(current_thd->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode)); } @@ -4256,13 +4346,10 @@ positioned using index_read. */ int ha_innobase::index_prev( /*====================*/ - /* out: 0, HA_ERR_END_OF_FILE, or error - number */ - mysql_byte* buf) /* in/out: buffer for previous row in MySQL - format */ + /* out: 0, HA_ERR_END_OF_FILE, or error number */ + uchar* buf) /* in/out: buffer for previous row in MySQL format */ { - statistic_increment(current_thd->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); return(general_fetch(buf, ROW_SEL_PREV, 0)); } @@ -4274,15 +4361,13 @@ corresponding row to buf. */ int ha_innobase::index_first( /*=====================*/ - /* out: 0, HA_ERR_END_OF_FILE, - or error code */ - mysql_byte* buf) /* in/out: buffer for the row */ + /* out: 0, HA_ERR_END_OF_FILE, or error code */ + uchar* buf) /* in/out: buffer for the row */ { int error; DBUG_ENTER("index_first"); - statistic_increment(current_thd->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); @@ -4302,14 +4387,13 @@ corresponding row to buf. */ int ha_innobase::index_last( /*====================*/ - /* out: 0, HA_ERR_END_OF_FILE, or error code */ - mysql_byte* buf) /* in/out: buffer for the row */ + /* out: 0, HA_ERR_END_OF_FILE, or error code */ + uchar* buf) /* in/out: buffer for the row */ { int error; DBUG_ENTER("index_last"); - statistic_increment(current_thd->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); @@ -4333,8 +4417,6 @@ ha_innobase::rnd_init( { int err; - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - /* Store the active index value so that we can restore the original value after a scan */ @@ -4375,14 +4457,13 @@ int ha_innobase::rnd_next( /*==================*/ /* out: 0, HA_ERR_END_OF_FILE, or error number */ - mysql_byte* buf)/* in/out: returns the row in this buffer, + uchar* buf) /* in/out: returns the row in this buffer, in MySQL format */ { int error; DBUG_ENTER("rnd_next"); - statistic_increment(current_thd->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); if (start_of_scan) { error = index_first(buf); @@ -4403,26 +4484,21 @@ Fetches a row from the table based on a row reference. */ int ha_innobase::rnd_pos( /*=================*/ - /* out: 0, HA_ERR_KEY_NOT_FOUND, - or error code */ - mysql_byte* buf, /* in/out: buffer for the row */ - mysql_byte* pos) /* in: primary key value of the row in the - MySQL format, or the row id if the clustered - index was internally generated by InnoDB; - the length of data in pos has to be - ref_length */ -{ - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + /* out: 0, HA_ERR_KEY_NOT_FOUND, or error code */ + uchar* buf, /* in/out: buffer for the row */ + uchar* pos) /* in: primary key value of the row in the + MySQL format, or the row id if the clustered + index was internally generated by InnoDB; the + length of data in pos has to be ref_length */ +{ int error; uint keynr = active_index; DBUG_ENTER("rnd_pos"); - DBUG_DUMP("key", (char*) pos, ref_length); + DBUG_DUMP("key", pos, ref_length); - statistic_increment(current_thd->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4466,13 +4542,11 @@ was positioned the last time. */ void ha_innobase::position( /*==================*/ - const mysql_byte* record) /* in: row in MySQL format */ + const uchar* record) /* in: row in MySQL format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; uint len; - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4498,6 +4572,24 @@ ha_innobase::position( } /********************************************************************* +If it's a DB_TOO_BIG_RECORD error then set a suitable message to +return to the client.*/ +inline +void +innodb_check_for_record_too_big_error( +/*==================================*/ + ulint comp, /* in: ROW_FORMAT: nonzero=COMPACT, 0=REDUNDANT */ + int error) /* in: error code to check */ +{ + if (error == (int)DB_TOO_BIG_RECORD) { + ulint max_row_size + = page_get_free_space_of_empty_noninline(comp) / 2; + + my_error(ER_TOO_BIG_ROWSIZE, MYF(0), max_row_size); + } +} + +/********************************************************************* Creates a table definition to an InnoDB database. */ static int @@ -4592,7 +4684,7 @@ create_table_def( } } - dict_mem_table_add_col(table, + dict_mem_table_add_col(table, table->heap, (char*) field->field_name, col_type, dtype_form_prtype( @@ -4605,6 +4697,8 @@ create_table_def( error = row_create_table_for_mysql(table, trx); + innodb_check_for_record_too_big_error(flags & DICT_TF_COMPACT, error); + error = convert_error_code_to_mysql(error, NULL); DBUG_RETURN(error); @@ -4727,9 +4821,12 @@ create_index( sure we don't create too long indexes. */ error = row_create_index_for_mysql(index, trx, field_lengths); + innodb_check_for_record_too_big_error(form->s->row_type + != ROW_TYPE_REDUNDANT, error); + error = convert_error_code_to_mysql(error, NULL); - my_free((gptr) field_lengths, MYF(0)); + my_free(field_lengths, MYF(0)); DBUG_RETURN(error); } @@ -4742,6 +4839,8 @@ int create_clustered_index_when_no_primary( /*===================================*/ trx_t* trx, /* in: InnoDB transaction handle */ + ulint comp, /* in: ROW_FORMAT: + nonzero=COMPACT, 0=REDUNDANT */ const char* table_name) /* in: table name */ { dict_index_t* index; @@ -4750,16 +4849,32 @@ create_clustered_index_when_no_primary( /* We pass 0 as the space id, and determine at a lower level the space id where to store the table */ - index = dict_mem_index_create((char*) table_name, - (char*) "GEN_CLUST_INDEX", 0, DICT_CLUSTERED, 0); + index = dict_mem_index_create(table_name, "GEN_CLUST_INDEX", + 0, DICT_CLUSTERED, 0); error = row_create_index_for_mysql(index, trx, NULL); + innodb_check_for_record_too_big_error(comp, error); + error = convert_error_code_to_mysql(error, NULL); return(error); } /********************************************************************* +Update create_info. Used in SHOW CREATE TABLE et al. */ + +void +ha_innobase::update_create_info( +/*============================*/ + HA_CREATE_INFO* create_info) /* in/out: create info */ +{ + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { + ha_innobase::info(HA_STATUS_AUTO); + create_info->auto_increment_value = stats.auto_increment_value; + } +} + +/********************************************************************* Creates a new table to an InnoDB database. */ int @@ -4781,7 +4896,7 @@ ha_innobase::create( uint i; char name2[FN_REFLEN]; char norm_name[FN_REFLEN]; - THD *thd= current_thd; + THD* thd = ha_thd(); ib_longlong auto_inc_value; ulint flags; @@ -4799,7 +4914,7 @@ ha_innobase::create( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(ht, thd); + parent_trx = check_trx_exists(thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -4809,13 +4924,13 @@ ha_innobase::create( trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; - trx->mysql_query_str = &((*thd).query); + trx->mysql_query_str = thd_query(thd); - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { trx->check_unique_secondary = FALSE; } @@ -4869,8 +4984,9 @@ ha_innobase::create( order the rows by their row id which is internally generated by InnoDB */ - error = create_clustered_index_when_no_primary(trx, - norm_name); + error = create_clustered_index_when_no_primary( + trx, form->s->row_type != ROW_TYPE_REDUNDANT, + norm_name); if (error) { goto cleanup; } @@ -4895,9 +5011,9 @@ ha_innobase::create( } } - if (thd->query != NULL) { + if (*trx->mysql_query_str) { error = row_table_add_foreign_constraints(trx, - thd->query, norm_name, + *trx->mysql_query_str, norm_name, create_info->options & HA_LEX_CREATE_TMP_TABLE); error = convert_error_code_to_mysql(error, NULL); @@ -4932,7 +5048,10 @@ ha_innobase::create( maximum value in the column. */ auto_inc_value = create_info->auto_increment_value; + + dict_table_autoinc_lock(innobase_table); dict_table_autoinc_initialize(innobase_table, auto_inc_value); + dict_table_autoinc_unlock(innobase_table); } /* Tell the InnoDB server that there might be work for @@ -4963,16 +5082,15 @@ ha_innobase::discard_or_import_tablespace( /* out: 0 == success, -1 == error */ my_bool discard) /* in: TRUE if discard, else import */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; dict_table_t* dict_table; trx_t* trx; int err; DBUG_ENTER("ha_innobase::discard_or_import_tablespace"); - ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx); + ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); dict_table = prebuilt->table; trx = prebuilt->trx; @@ -4996,24 +5114,22 @@ ha_innobase::delete_all_rows(void) /*==============================*/ /* out: error number */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; int error; - THD* thd = current_thd; DBUG_ENTER("ha_innobase::delete_all_rows"); - if (thd->lex->sql_command != SQLCOM_TRUNCATE) { + /* Get the transaction associated with the current thd, or create one + if not yet created, and update prebuilt->trx */ + + update_thd(ha_thd()); + + if (thd_sql_command(user_thd) != SQLCOM_TRUNCATE) { fallback: /* We only handle TRUNCATE TABLE t as a special case. DELETE FROM t will have to use ha_innobase::delete_row(). */ DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND); } - /* Get the transaction associated with the current thd, or create one - if not yet created, and update prebuilt->trx */ - - update_thd(thd); - /* Truncate the table in InnoDB */ error = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx); @@ -5044,7 +5160,7 @@ ha_innobase::delete_table( int error; trx_t* parent_trx; trx_t* trx; - THD *thd= current_thd; + THD *thd = ha_thd(); char norm_name[1000]; DBUG_ENTER("ha_innobase::delete_table"); @@ -5052,7 +5168,7 @@ ha_innobase::delete_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(ht, thd); + parent_trx = check_trx_exists(thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5067,14 +5183,14 @@ ha_innobase::delete_table( trx = trx_allocate_for_mysql(); - trx->mysql_thd = current_thd; - trx->mysql_query_str = &((*current_thd).query); + trx->mysql_thd = thd; + trx->mysql_query_str = thd_query(thd); - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { trx->check_unique_secondary = FALSE; } @@ -5090,7 +5206,8 @@ ha_innobase::delete_table( /* Drop the table in InnoDB */ error = row_drop_table_for_mysql(norm_name, trx, - thd->lex->sql_command == SQLCOM_DROP_DB); + thd_sql_command(thd) + == SQLCOM_DROP_DB); /* Flush the log to reduce probability that the .frm files and the InnoDB data dictionary get out-of-sync if the user runs @@ -5114,7 +5231,7 @@ ha_innobase::delete_table( /********************************************************************* Removes all tables in the named database inside InnoDB. */ - +static void innobase_drop_database( /*===================*/ @@ -5131,11 +5248,12 @@ innobase_drop_database( char* ptr; int error; char* namebuf; + THD* thd = current_thd; /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(hton, current_thd); + parent_trx = check_trx_exists(thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5150,7 +5268,7 @@ innobase_drop_database( } ptr++; - namebuf = my_malloc((uint) len + 2, MYF(0)); + namebuf = (char*) my_malloc((uint) len + 2, MYF(0)); memcpy(namebuf, ptr, len); namebuf[len] = '/'; @@ -5159,10 +5277,10 @@ innobase_drop_database( innobase_casedn_str(namebuf); #endif trx = trx_allocate_for_mysql(); - trx->mysql_thd = current_thd; - trx->mysql_query_str = &((*current_thd).query); + trx->mysql_thd = thd; + trx->mysql_query_str = thd_query(thd); - if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } @@ -5208,13 +5326,14 @@ ha_innobase::rename_table( trx_t* trx; char norm_from[1000]; char norm_to[1000]; + THD* thd = ha_thd(); DBUG_ENTER("ha_innobase::rename_table"); /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(ht, current_thd); + parent_trx = check_trx_exists(thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5228,10 +5347,10 @@ ha_innobase::rename_table( } trx = trx_allocate_for_mysql(); - trx->mysql_thd = current_thd; - trx->mysql_query_str = &((*current_thd).query); + trx->mysql_thd = thd; + trx->mysql_query_str = thd_query(thd); - if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } @@ -5281,10 +5400,9 @@ ha_innobase::records_in_range( key_range *max_key) /* in: range end key val, may also be 0 */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; KEY* key; dict_index_t* index; - mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc( + uchar* key_val_buff2 = (uchar*) my_malloc( table->s->reclength + table->s->max_key_length + 100, MYF(MY_FAE)); @@ -5300,8 +5418,7 @@ ha_innobase::records_in_range( DBUG_ENTER("records_in_range"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); prebuilt->trx->op_info = (char*)"estimating records in index range"; @@ -5327,7 +5444,7 @@ ha_innobase::records_in_range( (ulint)upd_and_key_val_buff_len, index, (byte*) (min_key ? min_key->key : - (const mysql_byte*) 0), + (const uchar*) 0), (ulint) (min_key ? min_key->length : 0), prebuilt->trx); @@ -5335,7 +5452,7 @@ ha_innobase::records_in_range( range_end, (byte*) key_val_buff2, buff2_len, index, (byte*) (max_key ? max_key->key : - (const mysql_byte*) 0), + (const uchar*) 0), (ulint) (max_key ? max_key->length : 0), prebuilt->trx); @@ -5349,7 +5466,7 @@ ha_innobase::records_in_range( dtuple_free_for_mysql(heap1); dtuple_free_for_mysql(heap2); - my_free((gptr) key_val_buff2, MYF(0)); + my_free(key_val_buff2, MYF(0)); prebuilt->trx->op_info = (char*)""; @@ -5375,7 +5492,6 @@ ha_innobase::estimate_rows_upper_bound(void) /*======================================*/ /* out: upper bound of rows */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; dict_index_t* index; ulonglong estimate; ulonglong local_data_file_length; @@ -5386,7 +5502,7 @@ ha_innobase::estimate_rows_upper_bound(void) external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*) "calculating upper bound for table rows"; @@ -5424,8 +5540,6 @@ ha_innobase::scan_time() /*====================*/ /* out: estimated time measured in disk seeks */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - /* Since MySQL seems to favor table scans too much over index searches, we pretend that a sequential read takes the same time as a random disk read, that is, we do not divide the following @@ -5481,7 +5595,6 @@ ha_innobase::info( /*==============*/ uint flag) /* in: what information MySQL requests */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; dict_table_t* ib_table; dict_index_t* index; ha_rows rec_per_key; @@ -5499,14 +5612,19 @@ ha_innobase::info( if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { - DBUG_RETURN(HA_ERR_CRASHED); + /* We return success (0) instead of HA_ERR_CRASHED, + because we want MySQL to process this query and not + stop, like it would do if it received the error code + HA_ERR_CRASHED. */ + + DBUG_RETURN(0); } /* We do not know if MySQL can call this function before calling external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5518,15 +5636,16 @@ ha_innobase::info( ib_table = prebuilt->table; if (flag & HA_STATUS_TIME) { - /* In sql_show we call with this flag: update then statistics - so that they are up-to-date */ + if (srv_stats_on_metadata) { + /* In sql_show we call with this flag: update + then statistics so that they are up-to-date */ - prebuilt->trx->op_info = (char*)"updating table statistics"; + prebuilt->trx->op_info = "updating table statistics"; - dict_update_statistics(ib_table); + dict_update_statistics(ib_table); - prebuilt->trx->op_info = (char*) - "returning various info to MySQL"; + prebuilt->trx->op_info = "returning various info to MySQL"; + } my_snprintf(path, sizeof(path), "%s/%s%s", mysql_data_home, ib_table->name, reg_ext); @@ -5650,7 +5769,8 @@ ha_innobase::info( } if (flag & HA_STATUS_ERRKEY) { - ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(prebuilt->trx); + ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); errkey = (unsigned int) row_get_mysql_key_number_for_index( (dict_index_t*) trx_get_error_info(prebuilt->trx)); @@ -5732,12 +5852,12 @@ ha_innobase::check( HA_CHECK_OPT* check_opt) /* in: check options, currently ignored */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; ulint ret; - ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + DBUG_ASSERT(thd == ha_thd()); + ut_a(prebuilt->trx); + ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(prebuilt->trx == thd_to_trx(thd)); if (prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template @@ -5767,9 +5887,8 @@ ha_innobase::update_table_comment( info on foreign keys */ const char* comment)/* in: table comment defined by user */ { - uint length = (uint) strlen(comment); - char* str; - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; + uint length = (uint) strlen(comment); + char* str; long flen; /* We do not know if MySQL can call this function before calling @@ -5780,7 +5899,7 @@ ha_innobase::update_table_comment( return((char*)comment); /* string too long */ } - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"returning table comment"; @@ -5795,9 +5914,9 @@ ha_innobase::update_table_comment( mutex_enter_noninline(&srv_dict_tmpfile_mutex); rewind(srv_dict_tmpfile); - fprintf(srv_dict_tmpfile, "InnoDB free: %lu kB", - (ulong) fsp_get_available_space_in_free_extents( - prebuilt->table->space)); + fprintf(srv_dict_tmpfile, "InnoDB free: %llu kB", + fsp_get_available_space_in_free_extents( + prebuilt->table->space)); dict_print_info_on_foreign_keys(FALSE, srv_dict_tmpfile, prebuilt->trx, prebuilt->table); @@ -5811,7 +5930,7 @@ ha_innobase::update_table_comment( /* allocate buffer for the full string, and read the contents of the temporary file */ - str = my_malloc(length + flen + 3, MYF(0)); + str = (char*) my_malloc(length + flen + 3, MYF(0)); if (str) { char* pos = str + length; @@ -5842,7 +5961,6 @@ ha_innobase::get_foreign_key_create_info(void) can be inserted to the CREATE TABLE statement, MUST be freed with ::free_foreign_key_create_info */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; char* str = 0; long flen; @@ -5852,7 +5970,7 @@ ha_innobase::get_foreign_key_create_info(void) external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"getting info on foreign keys"; @@ -5880,7 +5998,7 @@ ha_innobase::get_foreign_key_create_info(void) /* allocate buffer for the string, and read the contents of the temporary file */ - str = my_malloc(flen + 1, MYF(0)); + str = (char*) my_malloc(flen + 1, MYF(0)); if (str) { rewind(srv_dict_tmpfile); @@ -5900,9 +6018,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) dict_foreign_t* foreign; DBUG_ENTER("get_foreign_key_list"); - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; ut_a(prebuilt != NULL); - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"getting list of foreign keys"; trx_search_latch_release_if_reserved(prebuilt->trx); mutex_enter_noninline(&(dict_sys->mutex)); @@ -5913,8 +6030,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) FOREIGN_KEY_INFO f_key_info; LEX_STRING *name= 0; uint ulen; - char uname[NAME_LEN*3+1]; /* Unencoded name */ - char db_name[NAME_LEN*3+1]; + char uname[NAME_LEN+1]; /* Unencoded name */ + char db_name[NAME_LEN+1]; const char *tmp_buff; tmp_buff= foreign->id; @@ -5922,8 +6039,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) while (tmp_buff[i] != '/') i++; tmp_buff+= i + 1; - f_key_info.forein_id= make_lex_string(thd, 0, tmp_buff, - (uint) strlen(tmp_buff), 1); + f_key_info.forein_id = thd_make_lex_string(thd, 0, + tmp_buff, (uint) strlen(tmp_buff), 1); tmp_buff= foreign->referenced_table_name; /* Database name */ @@ -5935,22 +6052,23 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) } db_name[i]= 0; ulen= filename_to_tablename(db_name, uname, sizeof(uname)); - f_key_info.referenced_db= make_lex_string(thd, 0, uname, ulen, 1); + f_key_info.referenced_db = thd_make_lex_string(thd, 0, + uname, ulen, 1); /* Table name */ tmp_buff+= i + 1; ulen= filename_to_tablename(tmp_buff, uname, sizeof(uname)); - f_key_info.referenced_table= make_lex_string(thd, 0, uname, - ulen, 1); + f_key_info.referenced_table = thd_make_lex_string(thd, 0, + uname, ulen, 1); for (i= 0;;) { tmp_buff= foreign->foreign_col_names[i]; - name= make_lex_string(thd, name, tmp_buff, - (uint) strlen(tmp_buff), 1); + name = thd_make_lex_string(thd, name, + tmp_buff, (uint) strlen(tmp_buff), 1); f_key_info.foreign_fields.push_back(name); tmp_buff= foreign->referenced_col_names[i]; - name= make_lex_string(thd, name, tmp_buff, - (uint) strlen(tmp_buff), 1); + name = thd_make_lex_string(thd, name, + tmp_buff, (uint) strlen(tmp_buff), 1); f_key_info.referenced_fields.push_back(name); if (++i >= foreign->n_fields) break; @@ -5977,8 +6095,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) length=8; tmp_buff= "RESTRICT"; } - f_key_info.delete_method= make_lex_string(thd, f_key_info.delete_method, - tmp_buff, length, 1); + f_key_info.delete_method = thd_make_lex_string( + thd, f_key_info.delete_method, tmp_buff, length, 1); if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) @@ -6001,14 +6119,19 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) length=8; tmp_buff= "RESTRICT"; } - f_key_info.update_method= make_lex_string(thd, f_key_info.update_method, - tmp_buff, length, 1); - - + f_key_info.update_method = thd_make_lex_string( + thd, f_key_info.update_method, tmp_buff, length, 1); + if (foreign->referenced_index && + foreign->referenced_index->name) + { + f_key_info.referenced_key_name = thd_make_lex_string( + thd, f_key_info.referenced_key_name, + foreign->referenced_index->name, + strlen(foreign->referenced_index->name), 1); + } - FOREIGN_KEY_INFO *pf_key_info= ((FOREIGN_KEY_INFO *) - thd->memdup((gptr) &f_key_info, - sizeof(FOREIGN_KEY_INFO))); + FOREIGN_KEY_INFO *pf_key_info = (FOREIGN_KEY_INFO *) + thd_memdup(thd, &f_key_info, sizeof(FOREIGN_KEY_INFO)); f_key_list->push_back(pf_key_info); foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } @@ -6027,13 +6150,11 @@ bool ha_innobase::can_switch_engines(void) /*=================================*/ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; bool can_switch; DBUG_ENTER("ha_innobase::can_switch_engines"); - ut_a(prebuilt->trx == - (trx_t*) current_thd->ha_data[ht->slot]); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); prebuilt->trx->op_info = "determining if there are foreign key constraints"; @@ -6059,8 +6180,6 @@ ha_innobase::referenced_by_foreign_key(void) /*========================================*/ /* out: > 0 if referenced by a FOREIGN KEY */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; - if (dict_table_referenced_by_foreign_key(prebuilt->table)) { return(1); @@ -6093,8 +6212,6 @@ ha_innobase::extra( enum ha_extra_function operation) /* in: HA_EXTRA_FLUSH or some other flag */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - /* Warning: since it is not sure that MySQL calls external_lock before calling this function, the trx field in prebuilt can be obsolete! */ @@ -6106,8 +6223,7 @@ ha_innobase::extra( } break; case HA_EXTRA_RESET_STATE: - prebuilt->keep_other_fields_on_keyread = 0; - prebuilt->read_just_key = 0; + reset_template(prebuilt); break; case HA_EXTRA_NO_KEYREAD: prebuilt->read_just_key = 0; @@ -6118,6 +6234,26 @@ ha_innobase::extra( case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: prebuilt->keep_other_fields_on_keyread = 1; break; + + /* IMPORTANT: prebuilt->trx can be obsolete in + this method, because it is not sure that MySQL + calls external_lock before this method with the + parameters below. We must not invoke update_thd() + either, because the calling threads may change. + CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */ + case HA_EXTRA_IGNORE_DUP_KEY: + thd_to_trx(ha_thd())->duplicates |= TRX_DUP_IGNORE; + break; + case HA_EXTRA_WRITE_CAN_REPLACE: + thd_to_trx(ha_thd())->duplicates |= TRX_DUP_REPLACE; + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_REPLACE; + break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + thd_to_trx(ha_thd())->duplicates &= + ~(TRX_DUP_IGNORE | TRX_DUP_REPLACE); + break; default:/* Do nothing */ ; } @@ -6127,12 +6263,10 @@ ha_innobase::extra( int ha_innobase::reset() { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; if (prebuilt->blob_heap) { row_mysql_prebuilt_free_blob_heap(prebuilt); } - prebuilt->keep_other_fields_on_keyread = 0; - prebuilt->read_just_key = 0; + reset_template(prebuilt); return 0; } @@ -6146,7 +6280,7 @@ on that table. MySQL-5.0 also calls this before each statement in an execution of a stored procedure. To make the execution more deterministic for binlogging, MySQL-5.0 locks all tables involved in a stored procedure with full explicit table -locks (thd->in_lock_tables is true in ::store_lock()) before executing the +locks (thd_in_lock_tables(thd) holds in store_lock()) before executing the procedure. */ int @@ -6156,7 +6290,6 @@ ha_innobase::start_stmt( THD* thd, /* in: handle to the user thread */ thr_lock_type lock_type) { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; update_thd(thd); @@ -6174,8 +6307,7 @@ ha_innobase::start_stmt( prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; - prebuilt->read_just_key = 0; - prebuilt->keep_other_fields_on_keyread = FALSE; + reset_template(prebuilt); if (!prebuilt->mysql_has_locked) { /* This handle is for a temporary table created inside @@ -6186,7 +6318,7 @@ ha_innobase::start_stmt( prebuilt->select_lock_type = LOCK_X; } else { if (trx->isolation_level != TRX_ISO_SERIALIZABLE - && thd->lex->sql_command == SQLCOM_SELECT + && thd_sql_command(thd) == SQLCOM_SELECT && lock_type == TL_READ) { /* For other than temporary tables, we obtain @@ -6255,7 +6387,6 @@ ha_innobase::external_lock( THD* thd, /* in: handle to the user thread */ int lock_type) /* in: lock type */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; DBUG_ENTER("ha_innobase::external_lock"); @@ -6263,13 +6394,35 @@ ha_innobase::external_lock( update_thd(thd); + /* Statement based binlogging does not work in isolation level + READ UNCOMMITTED and READ COMMITTED since the necessary + locks cannot be taken. In this case, we print an + informative error message and return with an error. */ + if (lock_type == F_WRLCK) + { + ulong const binlog_format= thd_binlog_format(thd); + ulong const tx_isolation = thd_tx_isolation(current_thd); + if (tx_isolation <= ISO_READ_COMMITTED && + binlog_format == BINLOG_FORMAT_STMT) + { + char buf[256]; + my_snprintf(buf, sizeof(buf), + "Transaction level '%s' in" + " InnoDB is not safe for binlog mode '%s'", + tx_isolation_names[tx_isolation], + binlog_format_names[binlog_format]); + my_error(ER_BINLOG_LOGGING_IMPOSSIBLE, MYF(0), buf); + DBUG_RETURN(HA_ERR_LOGGING_IMPOSSIBLE); + } + } + + trx = prebuilt->trx; prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; - prebuilt->read_just_key = 0; - prebuilt->keep_other_fields_on_keyread = FALSE; + reset_template(prebuilt); if (lock_type == F_WRLCK) { @@ -6299,8 +6452,8 @@ ha_innobase::external_lock( if (trx->isolation_level == TRX_ISO_SERIALIZABLE && prebuilt->select_lock_type == LOCK_NONE - && (thd->options - & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + && thd_test_options(thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* To get serializable execution, we let InnoDB conceptually add 'LOCK IN SHARE MODE' to all SELECTs @@ -6321,23 +6474,23 @@ ha_innobase::external_lock( VERY easily deadlocks. We do not set InnoDB table locks if user has not explicitly - requested a table lock. Note that thd->in_lock_tables - can be TRUE on some cases e.g. at the start of a stored + requested a table lock. Note that thd_in_lock_tables(thd) + can hold in some cases, e.g., at the start of a stored procedure call (SQLCOM_CALL). */ if (prebuilt->select_lock_type != LOCK_NONE) { - if (thd->in_lock_tables && - thd->lex->sql_command == SQLCOM_LOCK_TABLES && - thd->variables.innodb_table_locks && - (thd->options & OPTION_NOT_AUTOCOMMIT)) { + if (thd_sql_command(thd) == SQLCOM_LOCK_TABLES + && THDVAR(thd, table_locks) + && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT) + && thd_in_lock_tables(thd)) { ulint error = row_lock_table_for_mysql( prebuilt, NULL, 0); if (error != DB_SUCCESS) { error = convert_error_code_to_mysql( - (int) error, user_thd); + (int) error, thd); DBUG_RETURN((int) error); } } @@ -6353,6 +6506,12 @@ ha_innobase::external_lock( trx->n_mysql_tables_in_use--; prebuilt->mysql_has_locked = FALSE; + /* Release a possible FIFO ticket and search latch. Since we + may reserve the kernel mutex, we have to release the search + system latch first to obey the latching order. */ + + innobase_release_stat_resources(trx); + /* If the MySQL lock count drops to zero we know that the current SQL statement has ended */ @@ -6361,13 +6520,7 @@ ha_innobase::external_lock( trx->mysql_n_tables_locked = 0; prebuilt->used_in_HANDLER = FALSE; - /* Release a possible FIFO ticket and search latch. Since we - may reserve the kernel mutex, we have to release the search - system latch first to obey the latching order. */ - - innobase_release_stat_resources(trx); - - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { if (trx->active_trans != 0) { innobase_commit(ht, thd, TRUE); } @@ -6397,7 +6550,6 @@ ha_innobase::transactional_table_lock( THD* thd, /* in: handle to the user thread */ int lock_type) /* in: lock type */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; DBUG_ENTER("ha_innobase::transactional_table_lock"); @@ -6409,7 +6561,7 @@ ha_innobase::transactional_table_lock( update_thd(thd); - if (prebuilt->table->ibd_file_missing && !current_thd->tablespace_op) { + if (prebuilt->table->ibd_file_missing && !thd_tablespace_op(thd)) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB error:\n" "MySQL is trying to use a table handle but the .ibd file for\n" @@ -6427,8 +6579,7 @@ ha_innobase::transactional_table_lock( prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; - prebuilt->read_just_key = 0; - prebuilt->keep_other_fields_on_keyread = FALSE; + reset_template(prebuilt); if (lock_type == F_WRLCK) { prebuilt->select_lock_type = LOCK_X; @@ -6454,17 +6605,17 @@ ha_innobase::transactional_table_lock( trx->active_trans = 1; } - if (thd->in_lock_tables && thd->variables.innodb_table_locks) { + if (THDVAR(thd, table_locks) && thd_in_lock_tables(thd)) { ulint error = DB_SUCCESS; error = row_lock_table_for_mysql(prebuilt, NULL, 0); if (error != DB_SUCCESS) { - error = convert_error_code_to_mysql((int) error, user_thd); + error = convert_error_code_to_mysql((int) error, thd); DBUG_RETURN((int) error); } - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* Store the current undo_no of the transaction so that we know where to roll back if we have @@ -6479,7 +6630,7 @@ ha_innobase::transactional_table_lock( /**************************************************************************** Here we export InnoDB status variables to MySQL. */ - +static int innodb_export_status() /*==================*/ @@ -6494,7 +6645,7 @@ innodb_export_status() /**************************************************************************** Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB Monitor to the client. */ - +static bool innodb_show_status( /*===============*/ @@ -6510,11 +6661,7 @@ innodb_show_status( DBUG_ENTER("innodb_show_status"); - if (have_innodb != SHOW_OPTION_YES) { - DBUG_RETURN(FALSE); - } - - trx = check_trx_exists(hton, thd); + trx = check_trx_exists(thd); innobase_release_stat_resources(trx); @@ -6544,7 +6691,7 @@ innodb_show_status( /* allocate buffer for the string, and read the contents of the temporary file */ - if (!(str = my_malloc(usable_len + 1, MYF(0)))) { + if (!(str = (char*) my_malloc(usable_len + 1, MYF(0)))) { mutex_exit_noninline(&srv_monitor_file_mutex); DBUG_RETURN(TRUE); } @@ -6585,7 +6732,7 @@ innodb_show_status( /**************************************************************************** Implements the SHOW MUTEX STATUS command. . */ - +static bool innodb_mutex_show_status( /*=====================*/ @@ -6684,6 +6831,7 @@ innodb_mutex_show_status( DBUG_RETURN(FALSE); } +static bool innobase_show_status(handlerton *hton, THD* thd, stat_print_fn* stat_print, enum ha_stat_type stat_type) @@ -6704,12 +6852,12 @@ bool innobase_show_status(handlerton *hton, THD* thd, locking. ****************************************************************************/ -static mysql_byte* innobase_get_key(INNOBASE_SHARE* share, uint* length, +static uchar* innobase_get_key(INNOBASE_SHARE* share, size_t *length, my_bool not_used __attribute__((unused))) { *length=share->table_name_length; - return (mysql_byte*) share->table_name; + return (uchar*) share->table_name; } static INNOBASE_SHARE* get_share(const char* table_name) @@ -6719,7 +6867,7 @@ static INNOBASE_SHARE* get_share(const char* table_name) uint length=(uint) strlen(table_name); if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables, - (mysql_byte*) table_name, + (uchar*) table_name, length))) { share = (INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1, @@ -6730,9 +6878,9 @@ static INNOBASE_SHARE* get_share(const char* table_name) strmov(share->table_name,table_name); if (my_hash_insert(&innobase_open_tables, - (mysql_byte*) share)) { + (uchar*) share)) { pthread_mutex_unlock(&innobase_share_mutex); - my_free((gptr) share,0); + my_free(share,0); return 0; } @@ -6752,10 +6900,10 @@ static void free_share(INNOBASE_SHARE* share) pthread_mutex_lock(&innobase_share_mutex); if (!--share->use_count) { - hash_delete(&innobase_open_tables, (mysql_byte*) share); + hash_delete(&innobase_open_tables, (uchar*) share); thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); - my_free((gptr) share, MYF(0)); + my_free(share, MYF(0)); } pthread_mutex_unlock(&innobase_share_mutex); @@ -6785,14 +6933,13 @@ ha_innobase::store_lock( 'lock'; this may also be TL_IGNORE */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; /* Note that trx in this function is NOT necessarily prebuilt->trx because we call update_thd() later, in ::external_lock()! Failure to understand this caused a serious memory corruption bug in 5.1.11. */ - trx = check_trx_exists(ht, thd); + trx = check_trx_exists(thd); /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! Be careful to ignore TL_IGNORE if we are going to do something with @@ -6802,24 +6949,36 @@ ha_innobase::store_lock( of the transaction. */ if (lock_type != TL_IGNORE - && trx->n_mysql_tables_in_use == 0) { + && trx->n_mysql_tables_in_use == 0) { trx->isolation_level = innobase_map_isolation_level( - (enum_tx_isolation) - thd->variables.tx_isolation); + (enum_tx_isolation) thd_tx_isolation(thd)); + + if (trx->isolation_level <= TRX_ISO_READ_COMMITTED + && trx->global_read_view) { + + /* At low transaction isolation levels we let + each consistent read set its own snapshot */ + + read_view_close_for_mysql(trx); + } } - if (thd->lex->sql_command == SQLCOM_DROP_TABLE) { + DBUG_ASSERT(thd == current_thd); + const bool in_lock_tables = thd_in_lock_tables(thd); + const uint sql_command = thd_sql_command(thd); + + if (sql_command == SQLCOM_DROP_TABLE) { /* MySQL calls this function in DROP TABLE though this table handle may belong to another thd that is running a query. Let us in that case skip any changes to the prebuilt struct. */ - } else if ((lock_type == TL_READ && thd->in_lock_tables) || - (lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) || - lock_type == TL_READ_WITH_SHARED_LOCKS || - lock_type == TL_READ_NO_INSERT || - (thd->lex->sql_command != SQLCOM_SELECT - && lock_type != TL_IGNORE)) { + } else if ((lock_type == TL_READ && in_lock_tables) + || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) + || lock_type == TL_READ_WITH_SHARED_LOCKS + || lock_type == TL_READ_NO_INSERT + || (lock_type != TL_IGNORE + && sql_command != SQLCOM_SELECT)) { /* The OR cases above are in this order: 1) MySQL is doing LOCK TABLES ... READ LOCAL, or we @@ -6844,12 +7003,12 @@ ha_innobase::store_lock( isolation_level = trx->isolation_level; if ((srv_locks_unsafe_for_binlog - || isolation_level == TRX_ISO_READ_COMMITTED) - && isolation_level != TRX_ISO_SERIALIZABLE - && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) - && (thd->lex->sql_command == SQLCOM_INSERT_SELECT - || thd->lex->sql_command == SQLCOM_UPDATE - || thd->lex->sql_command == SQLCOM_CREATE_TABLE)) { + || isolation_level == TRX_ISO_READ_COMMITTED) + && isolation_level != TRX_ISO_SERIALIZABLE + && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) + && (sql_command == SQLCOM_INSERT_SELECT + || sql_command == SQLCOM_UPDATE + || sql_command == SQLCOM_CREATE_TABLE)) { /* If we either have innobase_locks_unsafe_for_binlog option set or this session is using READ COMMITTED @@ -6862,7 +7021,7 @@ ha_innobase::store_lock( prebuilt->select_lock_type = LOCK_NONE; prebuilt->stored_select_lock_type = LOCK_NONE; - } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) { + } else if (sql_command == SQLCOM_CHECKSUM) { /* Use consistent read for checksum table */ prebuilt->select_lock_type = LOCK_NONE; @@ -6886,13 +7045,13 @@ ha_innobase::store_lock( /* Starting from 5.0.7, we weaken also the table locks set at the start of a MySQL stored procedure call, just like we weaken the locks set at the start of an SQL statement. - MySQL does set thd->in_lock_tables TRUE there, but in reality + MySQL does set in_lock_tables TRUE there, but in reality we do not need table locks to make the execution of a single transaction stored procedure call deterministic (if it does not use a consistent read). */ if (lock_type == TL_READ - && thd->lex->sql_command == SQLCOM_LOCK_TABLES) { + && sql_command == SQLCOM_LOCK_TABLES) { /* We come here if MySQL is processing LOCK TABLES ... READ LOCAL. MyISAM under that table lock type reads the table as it was at the time the lock was @@ -6914,28 +7073,17 @@ ha_innobase::store_lock( We especially allow multiple writers if MySQL is at the start of a stored procedure call (SQLCOM_CALL) or a - stored function call (MySQL does have thd->in_lock_tables + stored function call (MySQL does have in_lock_tables TRUE there). */ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT - && lock_type <= TL_WRITE) - && !(thd->in_lock_tables - && thd->lex->sql_command == SQLCOM_LOCK_TABLES) - && !thd->tablespace_op - && thd->lex->sql_command != SQLCOM_TRUNCATE - && thd->lex->sql_command != SQLCOM_OPTIMIZE - -#ifdef __WIN__ - /* For alter table on win32 for succesful operation - completion it is used TL_WRITE(=10) lock instead of - TL_WRITE_ALLOW_READ(=6), however here in innodb handler - TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes - race condition when several clients do alter table - simultaneously (bug #17264). This fix avoids the problem. */ - && thd->lex->sql_command != SQLCOM_ALTER_TABLE -#endif - - && thd->lex->sql_command != SQLCOM_CREATE_TABLE) { + && lock_type <= TL_WRITE) + && !(in_lock_tables + && sql_command == SQLCOM_LOCK_TABLES) + && !thd_tablespace_op(thd) + && sql_command != SQLCOM_TRUNCATE + && sql_command != SQLCOM_OPTIMIZE + && sql_command != SQLCOM_CREATE_TABLE) { lock_type = TL_WRITE_ALLOW_WRITE; } @@ -6948,10 +7096,10 @@ ha_innobase::store_lock( We especially allow concurrent inserts if MySQL is at the start of a stored procedure call (SQLCOM_CALL) - (MySQL does have thd->in_lock_tables TRUE there). */ + (MySQL does have thd_in_lock_tables() TRUE there). */ if (lock_type == TL_READ_NO_INSERT - && thd->lex->sql_command != SQLCOM_LOCK_TABLES) { + && sql_command != SQLCOM_LOCK_TABLES) { lock_type = TL_READ; } @@ -6973,21 +7121,25 @@ the value of the auto-inc counter. */ int ha_innobase::innobase_read_and_init_auto_inc( /*=========================================*/ - /* out: 0 or error code: deadlock or lock wait - timeout */ - longlong* ret) /* out: auto-inc value */ + /* out: 0 or error code: + deadlock or lock wait timeout */ + longlong* value) /* out: the autoinc value */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; longlong auto_inc; - ulint old_select_lock_type; + ibool stmt_start; + int mysql_error = 0; + dict_table_t* innodb_table = prebuilt->table; ibool trx_was_not_started = FALSE; - int error; ut_a(prebuilt); ut_a(prebuilt->table); + /* Remember if we are in the beginning of an SQL statement. + This function must not change that flag. */ + stmt_start = prebuilt->sql_stat_start; + /* Prepare prebuilt->trx in the table handle */ - update_thd(current_thd); + update_thd(ha_thd()); if (prebuilt->trx->conc_state == TRX_NOT_STARTED) { trx_was_not_started = TRUE; @@ -6998,114 +7150,115 @@ ha_innobase::innobase_read_and_init_auto_inc( trx_search_latch_release_if_reserved(prebuilt->trx); - auto_inc = dict_table_autoinc_read(prebuilt->table); - - if (auto_inc != 0) { - /* Already initialized */ - *ret = auto_inc; + dict_table_autoinc_lock(prebuilt->table); - error = 0; + auto_inc = dict_table_autoinc_read(prebuilt->table); - goto func_exit_early; + /* Was the AUTOINC counter reset during normal processing, if + so then we simply start count from 1. No need to go to the index.*/ + if (auto_inc == 0 && innodb_table->autoinc_inited) { + ++auto_inc; + dict_table_autoinc_initialize(innodb_table, auto_inc); } - error = row_lock_table_autoinc_for_mysql(prebuilt); + if (auto_inc == 0) { + dict_index_t* index; + ulint error; + const char* autoinc_col_name; - if (error != DB_SUCCESS) { - error = convert_error_code_to_mysql(error, user_thd); + ut_a(!innodb_table->autoinc_inited); - goto func_exit_early; - } + index = innobase_get_index(table->s->next_number_index); - /* Check again if someone has initialized the counter meanwhile */ - auto_inc = dict_table_autoinc_read(prebuilt->table); + autoinc_col_name = table->found_next_number_field->field_name; - if (auto_inc != 0) { - *ret = auto_inc; + error = row_search_max_autoinc( + index, autoinc_col_name, &auto_inc); - error = 0; + if (error == DB_SUCCESS) { + ++auto_inc; + dict_table_autoinc_initialize(innodb_table, auto_inc); + } else { + fprintf(stderr, " InnoDB error: Couldn't read the " + "max AUTOINC value from index (%s).\n", + index->name); - goto func_exit_early; + mysql_error = 1; + } } - (void) extra(HA_EXTRA_KEYREAD); - index_init(table->s->next_number_index, 1); + *value = auto_inc; - /* Starting from 5.0.9, we use a consistent read to read the auto-inc - column maximum value. This eliminates the spurious deadlocks caused - by the row X-lock that we previously used. Note the following flaw - in our algorithm: if some other user meanwhile UPDATEs the auto-inc - column, our consistent read will not return the largest value. We - accept this flaw, since the deadlocks were a bigger trouble. */ + dict_table_autoinc_unlock(prebuilt->table); - /* Fetch all the columns in the key */ + /* Since MySQL does not seem to call autocommit after SHOW TABLE + STATUS (even if we would register the trx here), we commit our + transaction here if it was started here. This is to eliminate a + dangling transaction. If the user had AUTOCOMMIT=0, then SHOW + TABLE STATUS does leave a dangling transaction if the user does not + himself call COMMIT. */ - prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; + if (trx_was_not_started) { - old_select_lock_type = prebuilt->select_lock_type; - prebuilt->select_lock_type = LOCK_NONE; + innobase_commit_low(prebuilt->trx); + } - /* Eliminate an InnoDB error print that happens when we try to SELECT - from a table when no table has been locked in ::external_lock(). */ - prebuilt->trx->n_mysql_tables_in_use++; + prebuilt->sql_stat_start = stmt_start; - error = index_last(table->record[1]); + return(mysql_error); +} - prebuilt->trx->n_mysql_tables_in_use--; - prebuilt->select_lock_type = old_select_lock_type; +/******************************************************************************* +Read the next autoinc value, initialize the table if it's not initialized. +On return if there is no error then the tables AUTOINC lock is locked.*/ - if (error) { - if (error == HA_ERR_END_OF_FILE) { - /* The table was empty, initialize to 1 */ - auto_inc = 1; +ulong +ha_innobase::innobase_get_auto_increment( + ulonglong* value) /* out: autoinc value */ +{ + ulong error; - error = 0; - } else { - /* This should not happen in a consistent read */ - sql_print_error("Consistent read of auto-inc column " - "returned %lu", (ulong) error); - auto_inc = -1; + do { + error = innobase_autoinc_lock(); - goto func_exit; - } - } else { - /* Initialize to max(col) + 1; we use - 'found_next_number_field' below because MySQL in SHOW TABLE - STATUS does not seem to set 'next_number_field'. The comment - in table.h says that 'next_number_field' is set when it is - 'active'. - Since 5.1 MySQL enforces that we announce fields which we will - read; as we only do a val_*() call, dbug_tmp_use_all_columns() - with read_set is sufficient. */ + if (error == DB_SUCCESS) { + ib_longlong autoinc; - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); - auto_inc = (longlong) table->found_next_number_field-> - val_int_offset(table->s->rec_buff_length) + 1; - dbug_tmp_restore_column_map(table->read_set, old_map); - } + /* Determine the first value of the interval */ + autoinc = dict_table_autoinc_read(prebuilt->table); - dict_table_autoinc_initialize(prebuilt->table, auto_inc); + /* We need to initialize the AUTO-INC value, for + that we release all locks.*/ + if (autoinc <= 0) { + trx_t* trx; -func_exit: - (void) extra(HA_EXTRA_NO_KEYREAD); + trx = prebuilt->trx; + dict_table_autoinc_unlock(prebuilt->table); - index_end(); + /* If we had reserved the AUTO-INC + lock in this SQL statement we release + it before retrying.*/ + row_unlock_table_autoinc_for_mysql(trx); - *ret = auto_inc; + /* Just to make sure */ + ut_a(!trx->auto_inc_lock); -func_exit_early: - /* Since MySQL does not seem to call autocommit after SHOW TABLE - STATUS (even if we would register the trx here), we commit our - transaction here if it was started here. This is to eliminate a - dangling transaction. If the user had AUTOCOMMIT=0, then SHOW - TABLE STATUS does leave a dangling transaction if the user does not - himself call COMMIT. */ + int mysql_error; - if (trx_was_not_started) { + mysql_error = innobase_read_and_init_auto_inc( + &autoinc); - innobase_commit_low(prebuilt->trx); - } + if (!mysql_error) { + /* Should have read the proper value */ + ut_a(autoinc > 0); + } else { + error = DB_ERROR; + } + } else { + *value = (ulonglong) autoinc; + } + } + } while (*value == 0 && error == DB_SUCCESS); return(error); } @@ -7118,37 +7271,91 @@ auto-inc counter in *first_value, and ULONGLONG_MAX in *nb_reserved_values (as we have a table-level lock). offset, increment, nb_desired_values are ignored. *first_value is set to -1 if error (deadlock or lock wait timeout) */ -void ha_innobase::get_auto_increment( +void +ha_innobase::get_auto_increment( /*=================================*/ - ulonglong offset, /* in */ - ulonglong increment, /* in */ - ulonglong nb_desired_values, /* in */ - ulonglong *first_value, /* out */ - ulonglong *nb_reserved_values) /* out */ + ulonglong offset, /* in: */ + ulonglong increment, /* in: table autoinc increment */ + ulonglong nb_desired_values, /* in: number of values reqd */ + ulonglong *first_value, /* out: the autoinc value */ + ulonglong *nb_reserved_values) /* out: count of reserved values */ { - longlong nr; - int error; + trx_t* trx; + ulint error; + ulonglong autoinc = 0; /* Prepare prebuilt->trx in the table handle */ - update_thd(current_thd); + update_thd(ha_thd()); - error = innobase_read_and_init_auto_inc(&nr); + error = innobase_get_auto_increment(&autoinc); - if (error) { - /* This should never happen in the current (5.0.6) code, since - we call this function only after the counter has been - initialized. */ + if (error != DB_SUCCESS) { + /* This should never happen in the code > ver 5.0.6, + since we call this function only after the counter + has been initialized. */ ut_print_timestamp(stderr); - sql_print_error("Error %lu in ::get_auto_increment()", - (ulong) error); - *first_value= (~(ulonglong) 0); + sql_print_error("Error %lu in ::get_auto_increment()", error); + + *first_value = (~(ulonglong) 0); return; } - *first_value= (ulonglong) nr; - /* table-level autoinc lock reserves up to +inf */ - *nb_reserved_values= ULONGLONG_MAX; + /* This is a hack, since nb_desired_values seems to be accurate only + for the first call to get_auto_increment() for multi-row INSERT and + meaningless for other statements e.g, LOAD etc. Subsequent calls to + this method for the same statement results in different values which + don't make sense. Therefore we store the value the first time we are + called and count down from that as rows are written (see write_row()). + */ + + trx = prebuilt->trx; + + /* Called for the first time ? */ + if (trx->n_autoinc_rows == 0) { + + trx->n_autoinc_rows = (ulint) nb_desired_values; + + /* It's possible for nb_desired_values to be 0: + e.g., INSERT INTO T1(C) SELECT C FROM T2; */ + if (nb_desired_values == 0) { + + trx->n_autoinc_rows = 1; + } + + set_if_bigger(*first_value, autoinc); + /* Not in the middle of a mult-row INSERT. */ + } else if (prebuilt->last_value == 0) { + set_if_bigger(*first_value, autoinc); + } + + *nb_reserved_values = trx->n_autoinc_rows; + + /* With old style AUTOINC locking we only update the table's + AUTOINC counter after attempting to insert the row. */ + if (innobase_autoinc_lock_mode != AUTOINC_OLD_STYLE_LOCKING) { + + /* Compute the last value in the interval */ + prebuilt->last_value = *first_value + + (*nb_reserved_values * increment); + + ut_a(prebuilt->last_value >= *first_value); + + /* Update the table autoinc variable */ + dict_table_autoinc_update( + prebuilt->table, prebuilt->last_value); + } else { + /* This will force write_row() into attempting an update + of the table's AUTOINC counter. */ + prebuilt->last_value = 0; + } + + /* The increment to be used to increase the AUTOINC value, we use + this in write_row() and update_row() to increase the autoinc counter + for columns that are filled by the user.*/ + prebuilt->table->autoinc_increment = increment; + + dict_table_autoinc_unlock(prebuilt->table); } /* See comment in handler.h */ @@ -7157,10 +7364,9 @@ ha_innobase::reset_auto_increment(ulonglong value) { DBUG_ENTER("ha_innobase::reset_auto_increment"); - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; - int error; + int error; - update_thd(current_thd); + update_thd(ha_thd()); error = row_lock_table_autoinc_for_mysql(prebuilt); @@ -7170,7 +7376,7 @@ ha_innobase::reset_auto_increment(ulonglong value) DBUG_RETURN(error); } - dict_table_autoinc_initialize(prebuilt->table, value); + innobase_reset_autoinc(value); DBUG_RETURN(0); } @@ -7179,7 +7385,7 @@ ha_innobase::reset_auto_increment(ulonglong value) bool ha_innobase::get_error_message(int error, String *buf) { - trx_t* trx = check_trx_exists(ht, current_thd); + trx_t* trx = check_trx_exists(ha_thd()); buf->copy(trx->detailed_error, strlen(trx->detailed_error), system_charset_info); @@ -7197,12 +7403,11 @@ ha_innobase::cmp_ref( /*=================*/ /* out: < 0 if ref1 < ref2, 0 if equal, else > 0 */ - const mysql_byte* ref1, /* in: an (internal) primary key value in the + const uchar* ref1, /* in: an (internal) primary key value in the MySQL key value format */ - const mysql_byte* ref2) /* in: an (internal) primary key value in the + const uchar* ref2) /* in: an (internal) primary key value in the MySQL key value format */ { - row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; enum_field_types mysql_type; Field* field; KEY_PART_INFO* key_part; @@ -7242,9 +7447,8 @@ ha_innobase::cmp_ref( ref1 += 2; ref2 += 2; - result = ((Field_blob*)field)->cmp( - (const char*)ref1, len1, - (const char*)ref2, len2); + result = ((Field_blob*)field)->cmp( ref1, len1, + ref2, len2); } else { result = field->key_cmp(ref1, ref2); } @@ -7261,6 +7465,33 @@ ha_innobase::cmp_ref( return(0); } +/*********************************************************************** +Ask InnoDB if a query to a table can be cached. */ + +my_bool +ha_innobase::register_query_cache_table( +/*====================================*/ + /* out: TRUE if query caching + of the table is permitted */ + THD* thd, /* in: user thread handle */ + char* table_key, /* in: concatenation of database name, + the null character '\0', + and the table name */ + uint key_length, /* in: length of the full name, i.e. + len(dbname) + len(tablename) + 1 */ + qc_engine_callback* + call_back, /* out: pointer to function for + checking if query caching + is permitted */ + ulonglong *engine_data) /* in/out: data to call_back */ +{ + *call_back = innobase_query_caching_of_table_permitted; + *engine_data = 0; + return(innobase_query_caching_of_table_permitted(thd, table_key, + key_length, + engine_data)); +} + char* ha_innobase::get_mysql_bin_log_name() { @@ -7276,7 +7507,6 @@ ha_innobase::get_mysql_bin_log_pos() return(trx_sys_mysql_bin_log_pos); } -extern "C" { /********************************************************************** This function is used to find the storage length in bytes of the first n characters for prefix indexes using a multibyte character set. The function @@ -7285,7 +7515,7 @@ index field in bytes. NOTE: the prototype of this function is copied to data0type.c! If you change this function, you MUST change also data0type.c! */ - +extern "C" ulint innobase_get_at_most_n_mbchars( /*===========================*/ @@ -7350,50 +7580,10 @@ innobase_get_at_most_n_mbchars( return(char_length); } -} - -/********************************************************************** -This function returns true if - -1) SQL-query in the current thread -is either REPLACE or LOAD DATA INFILE REPLACE. - -2) SQL-query in the current thread -is INSERT ON DUPLICATE KEY UPDATE. - -NOTE that storage/innobase/row/row0ins.c must contain the -prototype for this function ! */ -extern "C" -ibool -innobase_query_is_update(void) -/*==========================*/ -{ - THD* thd = current_thd; - - if (!thd) { - /* InnoDB's internal threads may run InnoDB stored procedures - that call this function. Then current_thd is not defined - (it is probably NULL). */ - - return(FALSE); - } - - switch (thd->lex->sql_command) { - case SQLCOM_REPLACE: - case SQLCOM_REPLACE_SELECT: - return(TRUE); - case SQLCOM_LOAD: - return(thd->lex->duplicates == DUP_REPLACE); - case SQLCOM_INSERT: - return(thd->lex->duplicates == DUP_UPDATE); - default: - return(FALSE); - } -} /*********************************************************************** This function is used to prepare X/Open XA distributed transaction */ - +static int innobase_xa_prepare( /*================*/ @@ -7405,10 +7595,10 @@ innobase_xa_prepare( FALSE - the current SQL statement ended */ { int error = 0; - trx_t* trx = check_trx_exists(hton, thd); + trx_t* trx = check_trx_exists(thd); - if (thd->lex->sql_command != SQLCOM_XA_PREPARE && - (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) + if (thd_sql_command(thd) != SQLCOM_XA_PREPARE && + (all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* For ibbackup to work the order of transactions in binlog @@ -7434,12 +7624,12 @@ innobase_xa_prepare( trx->active_trans = 2; } - if (!thd->variables.innodb_support_xa) { + if (!THDVAR(thd, support_xa)) { return(0); } - trx->xid=thd->transaction.xid_state.xid; + thd_get_xid(thd, (MYSQL_XID*) &trx->xid); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -7454,7 +7644,7 @@ innobase_xa_prepare( } if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to prepare the whole transaction, or this is an SQL statement end and autocommit is on */ @@ -7466,12 +7656,11 @@ innobase_xa_prepare( /* We just mark the SQL statement ended and do not do a transaction prepare */ - if (trx->auto_inc_lock) { - /* If we had reserved the auto-inc lock for some - table in this SQL statement we release it now */ + /* If we had reserved the auto-inc lock for some + table in this SQL statement we release it now */ + + row_unlock_table_autoinc_for_mysql(trx); - row_unlock_table_autoinc_for_mysql(trx); - } /* Store the current undo_no of the transaction so that we know where to roll back if we have to roll back the next SQL statement */ @@ -7489,7 +7678,7 @@ innobase_xa_prepare( /*********************************************************************** This function is used to recover X/Open XA distributed transactions */ - +static int innobase_xa_recover( /*================*/ @@ -7510,7 +7699,7 @@ innobase_xa_recover( /*********************************************************************** This function is used to commit one X/Open XA distributed transaction which is in the prepared state */ - +static int innobase_commit_by_xid( /*===================*/ @@ -7534,7 +7723,7 @@ innobase_commit_by_xid( /*********************************************************************** This function is used to rollback one X/Open XA distributed transaction which is in the prepared state */ - +static int innobase_rollback_by_xid( /*=====================*/ @@ -7558,30 +7747,31 @@ Create a consistent view for a cursor based on current transaction which is created if the corresponding MySQL thread still lacks one. This consistent view is then used inside of MySQL when accessing records using a cursor. */ - +static void* innobase_create_cursor_view( +/*========================*/ /* out: pointer to cursor view or NULL */ handlerton *hton, /* in: innobase hton */ THD* thd) /* in: user thread handle */ { - return(read_cursor_view_create_for_mysql( - check_trx_exists(hton, thd))); + return(read_cursor_view_create_for_mysql(check_trx_exists(thd))); } /*********************************************************************** Close the given consistent cursor view of a transaction and restore global read view to a transaction read view. Transaction is created if the corresponding MySQL thread still lacks one. */ - +static void innobase_close_cursor_view( +/*=======================*/ handlerton *hton, THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent read view to be closed */ { - read_cursor_view_close_for_mysql(check_trx_exists(hton, current_thd), - (cursor_view_t*) curview); + read_cursor_view_close_for_mysql(check_trx_exists(thd), + (cursor_view_t*) curview); } /*********************************************************************** @@ -7589,7 +7779,7 @@ Set the given consistent cursor view to a transaction which is created if the corresponding MySQL thread still lacks one. If the given consistent cursor view is NULL global read view of a transaction is restored to a transaction read view. */ - +static void innobase_set_cursor_view( /*=====================*/ @@ -7597,8 +7787,8 @@ innobase_set_cursor_view( THD* thd, /* in: user thread handle */ void* curview)/* in: Consistent cursor view to be set */ { - read_cursor_set_for_mysql(check_trx_exists(hton, current_thd), - (cursor_view_t*) curview); + read_cursor_set_for_mysql(check_trx_exists(thd), + (cursor_view_t*) curview); } @@ -7636,14 +7826,242 @@ static int show_innodb_vars(THD *thd, SHOW_VAR *var, char *buff) return 0; } -SHOW_VAR innodb_status_variables_export[]= { +static SHOW_VAR innodb_status_variables_export[]= { {"Innodb", (char*) &show_innodb_vars, SHOW_FUNC}, {NullS, NullS, SHOW_LONG} }; -struct st_mysql_storage_engine innobase_storage_engine= +static struct st_mysql_storage_engine innobase_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; +/* plugin options */ +static MYSQL_SYSVAR_BOOL(checksums, innobase_use_checksums, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Enable InnoDB checksums validation (enabled by default). " + "Disable with --skip-innodb-checksums.", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir, + PLUGIN_VAR_READONLY, + "The common part for InnoDB table spaces.", + NULL, NULL, NULL); + +static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Enable InnoDB doublewrite buffer (enabled by default). " + "Disable with --skip-innodb-doublewrite.", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_ULONG(fast_shutdown, innobase_fast_shutdown, + PLUGIN_VAR_OPCMDARG, + "Speeds up the shutdown process of the InnoDB storage engine. Possible " + "values are 0, 1 (faster)" + /* + NetWare can't close unclosed files, can't automatically kill remaining + threads, etc, so on this OS we disable the crash-like InnoDB shutdown. + */ + IF_NETWARE("", " or 2 (fastest - crash-like)") + ".", + NULL, NULL, 1, 0, IF_NETWARE(1,2), 0); + +static MYSQL_SYSVAR_BOOL(file_per_table, innobase_file_per_table, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Stores each InnoDB table to an .ibd file in the database dir.", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit, + PLUGIN_VAR_OPCMDARG, + "Set to 0 (write and flush once per second)," + " 1 (write and flush at each commit)" + " or 2 (write at commit, flush once per second).", + NULL, NULL, 1, 0, 2, 0); + +static MYSQL_SYSVAR_STR(flush_method, innobase_unix_file_flush_method, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "With which method to flush data.", NULL, NULL, NULL); + +static MYSQL_SYSVAR_BOOL(locks_unsafe_for_binlog, innobase_locks_unsafe_for_binlog, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Force InnoDB to not use next-key locking, to use only row-level locking.", + NULL, NULL, FALSE); + +#ifdef UNIV_LOG_ARCHIVE +static MYSQL_SYSVAR_STR(log_arch_dir, innobase_log_arch_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Where full logs should be archived.", NULL, NULL, NULL); + +static MYSQL_SYSVAR_BOOL(log_archive, innobase_log_archive, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Set to 1 if you want to have logs archived.", NULL, NULL, FALSE); +#endif /* UNIV_LOG_ARCHIVE */ + +static MYSQL_SYSVAR_STR(log_group_home_dir, innobase_log_group_home_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path to InnoDB log files.", NULL, NULL, NULL); + +static MYSQL_SYSVAR_ULONG(max_dirty_pages_pct, srv_max_buf_pool_modified_pct, + PLUGIN_VAR_RQCMDARG, + "Percentage of dirty pages allowed in bufferpool.", + NULL, NULL, 90, 0, 100, 0); + +static MYSQL_SYSVAR_ULONG(max_purge_lag, srv_max_purge_lag, + PLUGIN_VAR_RQCMDARG, + "Desired maximum length of the purge queue (0 = no limit)", + NULL, NULL, 0, 0, ~0L, 0); + +static MYSQL_SYSVAR_BOOL(rollback_on_timeout, innobase_rollback_on_timeout, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_BOOL(status_file, innobase_create_status_file, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR, + "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR, + "Enable statistics gathering for metadata commands such as SHOW TABLE STATUS (on by default)", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", + NULL, NULL, 1*1024*1024L, 512*1024L, ~0L, 1024); + +static MYSQL_SYSVAR_ULONG(autoextend_increment, srv_auto_extend_increment, + PLUGIN_VAR_RQCMDARG, + "Data file autoextend increment in megabytes", + NULL, NULL, 8L, 1L, 1000L, 0); + +static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", + NULL, NULL, 8*1024*1024L, 1024*1024L, LONGLONG_MAX, 1024*1024L); + +static MYSQL_SYSVAR_ULONG(commit_concurrency, srv_commit_concurrency, + PLUGIN_VAR_RQCMDARG, + "Helps in performance tuning in heavily concurrent environments.", + NULL, NULL, 0, 0, 1000, 0); + +static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter, + PLUGIN_VAR_RQCMDARG, + "Number of times a thread is allowed to enter InnoDB within the same SQL query after it has once got the ticket", + NULL, NULL, 500L, 1L, ~0L, 0); + +static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of file I/O threads in InnoDB.", + NULL, NULL, 4, 4, 64, 0); + +static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Helps to save your data in case the disk image of the database becomes corrupt.", + NULL, NULL, 0, 0, 6, 0); + +static MYSQL_SYSVAR_LONG(lock_wait_timeout, innobase_lock_wait_timeout, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.", + NULL, NULL, 50, 1, 1024 * 1024 * 1024, 0); + +static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "The size of the buffer which InnoDB uses to write log to the log files on disk.", + NULL, NULL, 1024*1024L, 256*1024L, ~0L, 1024); + +static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of each log file in a log group.", + NULL, NULL, 5*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L); + +static MYSQL_SYSVAR_LONG(log_files_in_group, innobase_log_files_in_group, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.", + NULL, NULL, 2, 2, 100, 0); + +static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", + NULL, NULL, 1, 1, 10, 0); + +static MYSQL_SYSVAR_LONG(open_files, innobase_open_files, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "How many files at the maximum InnoDB keeps open at the same time.", + NULL, NULL, 300L, 10L, ~0L, 0); + +static MYSQL_SYSVAR_ULONG(sync_spin_loops, srv_n_spin_wait_rounds, + PLUGIN_VAR_RQCMDARG, + "Count of spin-loop rounds in InnoDB mutexes", + NULL, NULL, 20L, 0L, ~0L, 0); + +static MYSQL_SYSVAR_ULONG(thread_concurrency, srv_thread_concurrency, + PLUGIN_VAR_RQCMDARG, + "Helps in performance tuning in heavily concurrent environments. Sets the maximum number of threads allowed inside InnoDB. Value 0 will disable the thread throttling.", + NULL, NULL, 8, 0, 1000, 0); + +static MYSQL_SYSVAR_ULONG(thread_sleep_delay, srv_thread_sleep_delay, + PLUGIN_VAR_RQCMDARG, + "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0 disable a sleep", + NULL, NULL, 10000L, 0L, ~0L, 0); + +static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path to individual files and their sizes.", + NULL, NULL, NULL); + +static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "The AUTOINC lock modes supported by InnoDB:\n" + " 0 => Old style AUTOINC locking (for backward compatibility)\n" + " 1 => New style AUTOINC locking\n" + " 2 => No AUTOINC locking (unsafe for SBR)", + NULL, NULL, + AUTOINC_NEW_STYLE_LOCKING, /* Default setting */ + AUTOINC_OLD_STYLE_LOCKING, /* Minimum value */ + AUTOINC_NO_LOCKING, 0); /* Maximum value */ + +static struct st_mysql_sys_var* innobase_system_variables[]= { + MYSQL_SYSVAR(additional_mem_pool_size), + MYSQL_SYSVAR(autoextend_increment), + MYSQL_SYSVAR(buffer_pool_size), + MYSQL_SYSVAR(checksums), + MYSQL_SYSVAR(commit_concurrency), + MYSQL_SYSVAR(concurrency_tickets), + MYSQL_SYSVAR(data_file_path), + MYSQL_SYSVAR(data_home_dir), + MYSQL_SYSVAR(doublewrite), + MYSQL_SYSVAR(fast_shutdown), + MYSQL_SYSVAR(file_io_threads), + MYSQL_SYSVAR(file_per_table), + MYSQL_SYSVAR(flush_log_at_trx_commit), + MYSQL_SYSVAR(flush_method), + MYSQL_SYSVAR(force_recovery), + MYSQL_SYSVAR(locks_unsafe_for_binlog), + MYSQL_SYSVAR(lock_wait_timeout), +#ifdef UNIV_LOG_ARCHIVE + MYSQL_SYSVAR(log_arch_dir), + MYSQL_SYSVAR(log_archive), +#endif /* UNIV_LOG_ARCHIVE */ + MYSQL_SYSVAR(log_buffer_size), + MYSQL_SYSVAR(log_file_size), + MYSQL_SYSVAR(log_files_in_group), + MYSQL_SYSVAR(log_group_home_dir), + MYSQL_SYSVAR(max_dirty_pages_pct), + MYSQL_SYSVAR(max_purge_lag), + MYSQL_SYSVAR(mirrored_log_groups), + MYSQL_SYSVAR(open_files), + MYSQL_SYSVAR(rollback_on_timeout), + MYSQL_SYSVAR(stats_on_metadata), + MYSQL_SYSVAR(status_file), + MYSQL_SYSVAR(support_xa), + MYSQL_SYSVAR(sync_spin_loops), + MYSQL_SYSVAR(table_locks), + MYSQL_SYSVAR(thread_concurrency), + MYSQL_SYSVAR(thread_sleep_delay), + MYSQL_SYSVAR(autoinc_lock_mode), + NULL +}; + mysql_declare_plugin(innobase) { MYSQL_STORAGE_ENGINE_PLUGIN, @@ -7656,9 +8074,7 @@ mysql_declare_plugin(innobase) NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, innodb_status_variables_export,/* status variables */ - NULL, /* system variables */ - NULL /* config options */ + innobase_system_variables, /* system variables */ + NULL /* reserved */ } mysql_declare_plugin_end; - -#endif diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index bc19f6fbf93..fe5ebd57990 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -32,35 +32,32 @@ typedef struct st_innobase_share { } INNOBASE_SHARE; +struct dict_index_struct; struct row_prebuilt_struct; -my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, - uint full_name_len, - ulonglong *unused); +typedef struct dict_index_struct dict_index_t; +typedef struct row_prebuilt_struct row_prebuilt_t; /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { - void* innobase_prebuilt;/* (row_prebuilt_t*) prebuilt - struct in InnoDB, used to save - CPU time with prebuilt data + row_prebuilt_t* prebuilt; /* prebuilt struct in InnoDB, used + to save CPU time with prebuilt data structures*/ THD* user_thd; /* the thread handle of the user currently using the handle; this is set in external_lock function */ - query_id_t last_query_id; /* the latest query id where the - handle was used */ THR_LOCK_DATA lock; INNOBASE_SHARE *share; - byte* upd_buff; /* buffer used in updates */ - byte* key_val_buff; /* buffer used in converting + uchar* upd_buff; /* buffer used in updates */ + uchar* key_val_buff; /* buffer used in converting search key values from MySQL format to Innodb format */ ulong upd_and_key_val_buff_len; /* the length of each of the previous two buffers */ - ulong int_table_flags; + Table_flags int_table_flags; uint primary_key; ulong start_of_scan; /* this is set to 1 when we are starting a table scan but have not @@ -71,11 +68,16 @@ class ha_innobase: public handler uint num_write_row; /* number of write_row() calls */ uint store_key_val_for_row(uint keynr, char* buff, uint buff_len, - const byte* record); + const uchar* record); int update_thd(THD* thd); int change_active_index(uint keynr); - int general_fetch(byte* buf, uint direction, uint match_mode); + int general_fetch(uchar* buf, uint direction, uint match_mode); int innobase_read_and_init_auto_inc(longlong* ret); + ulong innobase_autoinc_lock(); + ulong innobase_set_max_autoinc(ulonglong auto_inc); + ulong innobase_reset_autoinc(ulonglong auto_inc); + ulong innobase_get_auto_increment(ulonglong* value); + dict_index_t* innobase_get_index(uint keynr); /* Init values for the class: */ public: @@ -90,7 +92,7 @@ class ha_innobase: public handler const char* table_type() const { return("InnoDB");} const char *index_type(uint key_number) { return "BTREE"; } const char** bas_ext() const; - ulonglong table_flags() const { return int_table_flags; } + Table_flags table_flags() const; ulong index_flags(uint idx, uint part, bool all_parts) const { return (HA_READ_NEXT | @@ -116,32 +118,32 @@ class ha_innobase: public handler double scan_time(); double read_time(uint index, uint ranges, ha_rows rows); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); + int write_row(uchar * buf); + int update_row(const uchar * old_data, uchar * new_data); + int delete_row(const uchar * buf); bool was_semi_consistent_read(); void try_semi_consistent_read(bool yes); void unlock_row(); int index_init(uint index, bool sorted); int index_end(); - int index_read(byte * buf, const byte * key, + int index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint index, const byte * key, + int index_read_idx(uchar * buf, uint index, const uchar * key, uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_next_same(byte * buf, const byte *key, uint keylen); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); + int index_read_last(uchar * buf, const uchar * key, uint key_len); + int index_next(uchar * buf); + int index_next_same(uchar * buf, const uchar *key, uint keylen); + int index_prev(uchar * buf); + int index_first(uchar * buf); + int index_last(uchar * buf); int rnd_init(bool scan); int rnd_end(); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); - void position(const byte *record); + void position(const uchar *record); int info(uint); int analyze(THD* thd,HA_CHECK_OPT* check_opt); int optimize(THD* thd,HA_CHECK_OPT* check_opt); @@ -151,11 +153,12 @@ class ha_innobase: public handler int external_lock(THD *thd, int lock_type); int transactional_table_lock(THD *thd, int lock_type); int start_stmt(THD *thd, thr_lock_type lock_type); - void position(byte *record); + void position(uchar *record); ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); ha_rows estimate_rows_upper_bound(); + void update_create_info(HA_CREATE_INFO* create_info); int create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info); int delete_all_rows(); @@ -186,61 +189,67 @@ class ha_innobase: public handler my_bool register_query_cache_table(THD *thd, char *table_key, uint key_length, qc_engine_callback *call_back, - ulonglong *engine_data) - { - *call_back= innobase_query_caching_of_table_permitted; - *engine_data= 0; - return innobase_query_caching_of_table_permitted(thd, table_key, - key_length, - engine_data); - } + ulonglong *engine_data); static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); bool primary_key_is_clustered() { return true; } - int cmp_ref(const byte *ref1, const byte *ref2); + int cmp_ref(const uchar *ref1, const uchar *ref2); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); }; -extern SHOW_VAR innodb_status_variables[]; -extern ulong innobase_fast_shutdown; -extern ulong innobase_large_page_size; -extern long innobase_mirrored_log_groups, innobase_log_files_in_group; -extern longlong innobase_buffer_pool_size, innobase_log_file_size; -extern long innobase_log_buffer_size; -extern long innobase_additional_mem_pool_size; -extern long innobase_buffer_pool_awe_mem_mb; -extern long innobase_file_io_threads, innobase_lock_wait_timeout; -extern long innobase_force_recovery; -extern long innobase_open_files; -extern char *innobase_data_home_dir, *innobase_data_file_path; -extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; -extern char *innobase_unix_file_flush_method; -/* The following variables have to be my_bool for SHOW VARIABLES to work */ -extern my_bool innobase_log_archive, - innobase_use_doublewrite, - innobase_use_checksums, - innobase_use_large_pages, - innobase_use_native_aio, - innobase_file_per_table, innobase_locks_unsafe_for_binlog, - innobase_rollback_on_timeout, - innobase_create_status_file; +/* Some accessor functions which the InnoDB plugin needs, but which +can not be added to mysql/plugin.h as part of the public interface; +the definitions are bracketed with #ifdef INNODB_COMPATIBILITY_HOOKS */ + +#ifndef INNODB_COMPATIBILITY_HOOKS +#error InnoDB needs MySQL to be built with #define INNODB_COMPATIBILITY_HOOKS +#endif + extern "C" { -extern ulong srv_max_buf_pool_modified_pct; -extern ulong srv_max_purge_lag; -extern ulong srv_auto_extend_increment; -extern ulong srv_n_spin_wait_rounds; -extern ulong srv_n_free_tickets_to_enter; -extern ulong srv_thread_sleep_delay; -extern ulong srv_thread_concurrency; -extern ulong srv_commit_concurrency; -extern ulong srv_flush_log_at_trx_commit; -} +struct charset_info_st *thd_charset(MYSQL_THD thd); +char **thd_query(MYSQL_THD thd); + +/** Get the file name of the MySQL binlog. + * @return the name of the binlog file + */ +const char* mysql_bin_log_file_name(void); + +/** Get the current position of the MySQL binlog. + * @return byte offset from the beginning of the binlog + */ +ulonglong mysql_bin_log_file_pos(void); + +/** + Check if a user thread is a replication slave thread + @param thd user thread + @retval 0 the user thread is not a replication slave thread + @retval 1 the user thread is a replication slave thread +*/ +int thd_slave_thread(const MYSQL_THD thd); -int innobase_init(void); -int innobase_end(handlerton *hton, ha_panic_function type); -bool innobase_flush_logs(handlerton *hton); -uint innobase_get_free_space(void); +/** + Check if a user thread is running a non-transactional update + @param thd user thread + @retval 0 the user thread is not running a non-transactional update + @retval 1 the user thread is running a non-transactional update +*/ +int thd_non_transactional_update(const MYSQL_THD thd); + +/** + Get the user thread's binary logging format + @param thd user thread + @return Value to be used as index into the binlog_format_names array +*/ +int thd_binlog_format(const MYSQL_THD thd); + +/** + Mark transaction to rollback and mark error as fatal to a sub-statement. + @param thd Thread handle + @param all TRUE <=> rollback main transaction. +*/ +void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); +} /* don't delete it - it may be re-enabled later @@ -255,93 +264,3 @@ int innobase_report_binlog_offset_and_commit( int innobase_commit_complete(void* trx_handle); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); #endif - -void innobase_drop_database(handlerton *hton, char *path); -bool innobase_show_status(handlerton *hton, THD* thd, stat_print_fn*, enum ha_stat_type); - -int innobase_release_temporary_latches(handlerton *hton, THD *thd); - -void innobase_store_binlog_offset_and_flush_log(handlerton *hton, char *binlog_name,longlong offset); - -int innobase_start_trx_and_assign_read_view(handlerton *hton, THD* thd); - -/*********************************************************************** -This function is used to prepare X/Open XA distributed transaction */ - -int innobase_xa_prepare( -/*====================*/ - /* out: 0 or error number */ - handlerton *hton, /* in: innobase hton */ - THD* thd, /* in: handle to the MySQL thread of the user - whose XA transaction should be prepared */ - bool all); /* in: TRUE - commit transaction - FALSE - the current SQL statement ended */ - -/*********************************************************************** -This function is used to recover X/Open XA distributed transactions */ - -int innobase_xa_recover( -/*====================*/ - /* out: number of prepared transactions - stored in xid_list */ - handlerton *hton, /* in: innobase hton */ - XID* xid_list, /* in/out: prepared transactions */ - uint len); /* in: number of slots in xid_list */ - -/*********************************************************************** -This function is used to commit one X/Open XA distributed transaction -which is in the prepared state */ - -int innobase_commit_by_xid( -/*=======================*/ - /* out: 0 or error number */ - handlerton *hton, /* in: innobase hton */ - XID* xid); /* in : X/Open XA Transaction Identification */ - -/*********************************************************************** -This function is used to rollback one X/Open XA distributed transaction -which is in the prepared state */ - -int innobase_rollback_by_xid( - /* out: 0 or error number */ - handlerton *hton, /* in: innobase hton */ - XID *xid); /* in : X/Open XA Transaction Identification */ - - -/*********************************************************************** -Create a consistent view for a cursor based on current transaction -which is created if the corresponding MySQL thread still lacks one. -This consistent view is then used inside of MySQL when accessing records -using a cursor. */ - -void* -innobase_create_cursor_view( - /* out: Pointer to cursor view or NULL */ - handlerton *hton, /* in: innobase hton */ - THD* thd); /* in: user thread handle */ - -/*********************************************************************** -Close the given consistent cursor view of a transaction and restore -global read view to a transaction read view. Transaction is created if the -corresponding MySQL thread still lacks one. */ - -void -innobase_close_cursor_view( -/*=======================*/ - handlerton *hton, /* in: innobase hton */ - THD* thd, /* in: user thread handle */ - void* curview); /* in: Consistent read view to be closed */ - - -/*********************************************************************** -Set the given consistent cursor view to a transaction which is created -if the corresponding MySQL thread still lacks one. If the given -consistent cursor view is NULL global read view of a transaction is -restored to a transaction read view. */ - -void -innobase_set_cursor_view( -/*=====================*/ - handlerton *hton, /* in: innobase hton */ - THD* thd, /* in: user thread handle */ - void* curview); /* in: Consistent read view to be set */ diff --git a/storage/innobase/ibuf/Makefile.am b/storage/innobase/ibuf/Makefile.am deleted file mode 100644 index 42adda9a4ef..00000000000 --- a/storage/innobase/ibuf/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libibuf.a - -libibuf_a_SOURCES = ibuf0ibuf.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c index 96ab928a436..44972356304 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.c +++ b/storage/innobase/ibuf/ibuf0ibuf.c @@ -150,9 +150,30 @@ ulint ibuf_flush_count = 0; #define IBUF_COUNT_N_PAGES 2000 /* Buffered entry counts for file pages, used in debugging */ -static ulint* ibuf_counts[IBUF_COUNT_N_SPACES]; +static ulint ibuf_counts[IBUF_COUNT_N_SPACES][IBUF_COUNT_N_PAGES]; -static ibool ibuf_counts_inited = FALSE; +/********************************************************************** +Checks that the indexes to ibuf_counts[][] are within limits. */ +UNIV_INLINE +void +ibuf_count_check( +/*=============*/ + ulint space_id, /* in: space identifier */ + ulint page_no) /* in: page number */ +{ + if (space_id < IBUF_COUNT_N_SPACES && page_no < IBUF_COUNT_N_PAGES) { + return; + } + + fprintf(stderr, + "InnoDB: UNIV_IBUF_DEBUG limits space_id and page_no\n" + "InnoDB: and breaks crash recovery.\n" + "InnoDB: space_id=%lu, should be 0<=space_id<%lu\n" + "InnoDB: page_no=%lu, should be 0<=page_no<%lu\n", + (ulint) space_id, (ulint) IBUF_COUNT_N_SPACES, + (ulint) page_no, (ulint) IBUF_COUNT_N_PAGES); + ut_error; +} #endif /* The start address for an insert buffer bitmap page bitmap */ @@ -328,15 +349,9 @@ ibuf_count_get( ulint space, /* in: space id */ ulint page_no)/* in: page number */ { - ut_ad(space < IBUF_COUNT_N_SPACES); - ut_ad(page_no < IBUF_COUNT_N_PAGES); - - if (!ibuf_counts_inited) { + ibuf_count_check(space, page_no); - return(0); - } - - return(*(ibuf_counts[space] + page_no)); + return(ibuf_counts[space][page_no]); } /********************************************************************** @@ -349,11 +364,10 @@ ibuf_count_set( ulint page_no,/* in: page number */ ulint val) /* in: value to set */ { - ut_a(space < IBUF_COUNT_N_SPACES); - ut_a(page_no < IBUF_COUNT_N_PAGES); + ibuf_count_check(space, page_no); ut_a(val < UNIV_PAGE_SIZE); - *(ibuf_counts[space] + page_no) = val; + ibuf_counts[space][page_no] = val; } #endif @@ -378,22 +392,6 @@ ibuf_init_at_db_start(void) ibuf->size = 0; -#ifdef UNIV_IBUF_DEBUG - { - ulint i, j; - - for (i = 0; i < IBUF_COUNT_N_SPACES; i++) { - - ibuf_counts[i] = mem_alloc(sizeof(ulint) - * IBUF_COUNT_N_PAGES); - for (j = 0; j < IBUF_COUNT_N_PAGES; j++) { - ibuf_count_set(i, j, 0); - } - } - - ibuf_counts_inited = TRUE; - } -#endif mutex_create(&ibuf_pessimistic_insert_mutex, SYNC_IBUF_PESS_INSERT_MUTEX); @@ -417,9 +415,7 @@ ibuf_data_sizes_update( { ulint old_size; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); -#endif /* UNIV_SYNC_DEBUG */ old_size = data->size; @@ -466,7 +462,8 @@ ibuf_data_init_for_space( page_t* root; page_t* header_page; mtr_t mtr; - char buf[50]; + char* buf; + mem_heap_t* heap; dict_table_t* table; dict_index_t* index; ulint n_used; @@ -520,16 +517,20 @@ ibuf_data_init_for_space( ibuf_exit(); + heap = mem_heap_create(450); + buf = mem_heap_alloc(heap, 50); + sprintf(buf, "SYS_IBUF_TABLE_%lu", (ulong) space); /* use old-style record format for the insert buffer */ table = dict_mem_table_create(buf, space, 2, 0); - dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "PAGE_NO", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "TYPES", DATA_BINARY, 0, 0); table->id = ut_dulint_add(DICT_IBUF_ID_MIN, space); - dict_table_add_to_cache(table); + dict_table_add_to_cache(table, heap); + mem_heap_free(heap); index = dict_mem_index_create( buf, "CLUST_IND", space, @@ -569,7 +570,8 @@ ibuf_bitmap_page_init( bit_offset = XDES_DESCRIBED_PER_PAGE * IBUF_BITS_PER_PAGE; - byte_offset = bit_offset / 8 + 1; /* better: (bit_offset + 7) / 8 */ + byte_offset = bit_offset / 8 + 1; + /* better: byte_offset = UT_BITS_IN_BYTES(bit_offset); */ fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP); @@ -1142,7 +1144,7 @@ ibuf_dummy_index_add_col( ulint len) /* in: length of the column */ { ulint i = index->table->n_def; - dict_mem_table_add_col(index->table, "DUMMY", + dict_mem_table_add_col(index->table, NULL, NULL, dtype_get_mtype(type), dtype_get_prtype(type), dtype_get_len(type)); @@ -1164,11 +1166,6 @@ ibuf_dummy_index_free( dict_mem_table_free(table); } -void -dict_index_print_low( -/*=================*/ - dict_index_t* index); /* in: index */ - /************************************************************************* Builds the entry to insert into a non-clustered index when we have the corresponding record in an ibuf index. */ @@ -1443,6 +1440,9 @@ ibuf_entry_build( *buf2++ = 0; /* write the compact format indicator */ } for (i = 0; i < n_fields; i++) { + ulint fixed_len; + const dict_field_t* ifield; + /* We add 4 below because we have the 4 extra fields at the start of an ibuf record */ @@ -1450,10 +1450,30 @@ ibuf_entry_build( entry_field = dtuple_get_nth_field(entry, i); dfield_copy(field, entry_field); + ifield = dict_index_get_nth_field(index, i); + /* Prefix index columns of fixed-length columns are of + fixed length. However, in the function call below, + dfield_get_type(entry_field) contains the fixed length + of the column in the clustered index. Replace it with + the fixed length of the secondary index column. */ + fixed_len = ifield->fixed_len; + +#ifdef UNIV_DEBUG + if (fixed_len) { + /* dict_index_add_col() should guarantee these */ + ut_ad(fixed_len <= (ulint) entry_field->type.len); + if (ifield->prefix_len) { + ut_ad(ifield->prefix_len == fixed_len); + } else { + ut_ad(fixed_len + == (ulint) entry_field->type.len); + } + } +#endif /* UNIV_DEBUG */ + dtype_new_store_for_order_and_null_size( buf2 + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE, - dfield_get_type(entry_field), - dict_index_get_nth_field(index, i)->prefix_len); + dfield_get_type(entry_field), fixed_len); } /* Store the type info in buf2 to field 3 of tuple */ @@ -1576,9 +1596,7 @@ ibuf_data_enough_free_for_insert( /* out: TRUE if enough free pages in list */ ibuf_data_t* data) /* in: ibuf data for the space */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* We want a big margin of free pages, because a B-tree can sometimes grow in size also if records are deleted from it, as the node pointers @@ -1604,16 +1622,9 @@ ibuf_data_too_much_free( /* out: TRUE if enough free pages in list */ ibuf_data_t* data) /* in: ibuf data for the space */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); -#endif /* UNIV_SYNC_DEBUG */ - - if (data->free_list_len >= 3 + data->size / 2 + 3 * data->height) { - return(TRUE); - } - - return(FALSE); + return(data->free_list_len >= 3 + data->size / 2 + 3 * data->height); } /************************************************************************* @@ -3451,9 +3462,7 @@ ibuf_validate_low(void) ibuf_data_t* data; ulint sum_sizes; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); -#endif /* UNIV_SYNC_DEBUG */ sum_sizes = 0; diff --git a/storage/innobase/include/Makefile.i b/storage/innobase/include/Makefile.i deleted file mode 100644 index db436c702ff..00000000000 --- a/storage/innobase/include/Makefile.i +++ /dev/null @@ -1,10 +0,0 @@ -# Makefile included in Makefile.am in every subdirectory - -INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \ - -I$(top_srcdir)/regex \ - -I$(top_srcdir)/storage/innobase/include \ - -I$(top_srcdir)/sql \ - -I$(srcdir) - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index c448c28933a..b077ff0c181 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -28,7 +28,7 @@ buf_block_peek_if_too_old( buf_block_t* block) /* in: block to make younger */ { return(buf_pool->freed_page_clock >= block->freed_page_clock - + 1 + (buf_pool->curr_size / 1024)); + + 1 + (buf_pool->curr_size / 4)); } /************************************************************************* @@ -128,9 +128,7 @@ buf_pool_clock_tic(void) /*====================*/ /* out: new clock value */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ buf_pool->ulint_clock++; @@ -456,7 +454,7 @@ buf_frame_modify_clock_inc( #ifdef UNIV_SYNC_DEBUG ut_ad((mutex_own(&(buf_pool->mutex)) && (block->buf_fix_count == 0)) || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE)); -#endif /*UNIV_SYNC_DEBUG */ +#endif /* UNIV_SYNC_DEBUG */ UT_DULINT_INC(block->modify_clock); @@ -513,14 +511,12 @@ buf_block_buf_fix_inc_debug( const char* file __attribute__ ((unused)), /* in: file name */ ulint line __attribute__ ((unused))) /* in: line */ { -#ifdef UNIV_SYNC_DEBUG ibool ret; ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line); ut_ad(ret == TRUE); ut_ad(mutex_own(&block->mutex)); -#endif block->buf_fix_count++; } #else /* UNIV_SYNC_DEBUG */ @@ -532,9 +528,8 @@ buf_block_buf_fix_inc( /*==================*/ buf_block_t* block) /* in: block to bufferfix */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&block->mutex)); -#endif + block->buf_fix_count++; } #endif /* UNIV_SYNC_DEBUG */ @@ -552,9 +547,7 @@ buf_page_hash_get( ulint fold; ut_ad(buf_pool); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* Look for the page in the hash table */ diff --git a/storage/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic index b304673f8be..ae873c42088 100644 --- a/storage/innobase/include/buf0flu.ic +++ b/storage/innobase/include/buf0flu.ic @@ -42,8 +42,8 @@ buf_flush_note_modification( ut_ad(block->buf_fix_count > 0); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); - ut_ad(mutex_own(&(buf_pool->mutex))); #endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(ut_dulint_cmp(mtr->start_lsn, ut_dulint_zero) != 0); ut_ad(mtr->modifications); diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index 843c70af577..0aa1b87e470 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -62,6 +62,11 @@ Created 5/24/1996 Heikki Tuuri lead to a duplicate key in some table */ +#define DB_TOO_MANY_CONCURRENT_TRXS 47 /* when InnoDB runs out of the + preconfigured undo slots, this can + only happen when there are too many + concurrent transactions */ + /* The following are partial failure codes */ #define DB_FAIL 1000 #define DB_OVERFLOW 1001 diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 836a6290498..2f038b21e8e 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -92,6 +92,17 @@ dict_col_copy_type_noninline( /*=========================*/ const dict_col_t* col, /* in: column */ dtype_t* type); /* out: data type */ +#ifdef UNIV_DEBUG +/************************************************************************* +Assert that a column and a data type match. */ +UNIV_INLINE +ibool +dict_col_type_assert_equal( +/*=======================*/ + /* out: TRUE */ + const dict_col_t* col, /* in: column */ + const dtype_t* type); /* in: data type */ +#endif /* UNIV_DEBUG */ /*************************************************************************** Returns the minimum size of the column. */ UNIV_INLINE @@ -160,6 +171,13 @@ dict_col_name_is_reserved( /* out: TRUE if name is reserved */ const char* name); /* in: column name */ /************************************************************************ +Acquire the autoinc lock.*/ + +void +dict_table_autoinc_lock( +/*====================*/ + dict_table_t* table); /* in: table */ +/************************************************************************ Initializes the autoinc counter. It is not an error to initialize an already initialized counter. */ @@ -169,22 +187,6 @@ dict_table_autoinc_initialize( dict_table_t* table, /* in: table */ ib_longlong value); /* in: next value to assign to a row */ /************************************************************************ -Gets the next autoinc value (== autoinc counter value), 0 if not yet -initialized. If initialized, increments the counter by 1. */ - -ib_longlong -dict_table_autoinc_get( -/*===================*/ - /* out: value for a new row, or 0 */ - dict_table_t* table); /* in: table */ -/************************************************************************ -Decrements the autoinc counter value by 1. */ - -void -dict_table_autoinc_decrement( -/*=========================*/ - dict_table_t* table); /* in: table */ -/************************************************************************ Reads the next autoinc value (== autoinc counter value), 0 if not yet initialized. */ @@ -194,15 +196,6 @@ dict_table_autoinc_read( /* out: value for a new row, or 0 */ dict_table_t* table); /* in: table */ /************************************************************************ -Peeks the autoinc counter value, 0 if not yet initialized. Does not -increment the counter. The read not protected by any mutex! */ - -ib_longlong -dict_table_autoinc_peek( -/*====================*/ - /* out: value of the counter */ - dict_table_t* table); /* in: table */ -/************************************************************************ Updates the autoinc counter if the value supplied is equal or bigger than the current value. If not inited, does nothing. */ @@ -212,13 +205,29 @@ dict_table_autoinc_update( dict_table_t* table, /* in: table */ ib_longlong value); /* in: value which was assigned to a row */ +/************************************************************************ +Release the autoinc lock.*/ + +void +dict_table_autoinc_unlock( +/*======================*/ + dict_table_t* table); /* in: table */ +/************************************************************************** +Adds system columns to a table object. */ + +void +dict_table_add_system_columns( +/*==========================*/ + dict_table_t* table, /* in/out: table */ + mem_heap_t* heap); /* in: temporary heap */ /************************************************************************** Adds a table object to the dictionary cache. */ void dict_table_add_to_cache( /*====================*/ - dict_table_t* table); /* in: table */ + dict_table_t* table, /* in: table */ + mem_heap_t* heap); /* in: temporary heap */ /************************************************************************** Removes a table object from the dictionary cache. */ diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index d59e99277da..7d38cbcd1fa 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -30,6 +30,30 @@ dict_col_copy_type( type->mbmaxlen = col->mbmaxlen; } +#ifdef UNIV_DEBUG +/************************************************************************* +Assert that a column and a data type match. */ +UNIV_INLINE +ibool +dict_col_type_assert_equal( +/*=======================*/ + /* out: TRUE */ + const dict_col_t* col, /* in: column */ + const dtype_t* type) /* in: data type */ +{ + ut_ad(col); + ut_ad(type); + + ut_ad(col->mtype == type->mtype); + ut_ad(col->prtype == type->prtype); + ut_ad(col->len == type->len); + ut_ad(col->mbminlen == type->mbminlen); + ut_ad(col->mbmaxlen == type->mbmaxlen); + + return(TRUE); +} +#endif /* UNIV_DEBUG */ + /*************************************************************************** Returns the minimum size of the column. */ UNIV_INLINE @@ -551,9 +575,7 @@ dict_table_check_if_in_cache_low( ulint table_fold; ut_ad(table_name); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* Look for the table name in the hash table */ table_fold = ut_fold_string(table_name); @@ -576,9 +598,7 @@ dict_table_get_low( dict_table_t* table; ut_ad(table_name); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ table = dict_table_check_if_in_cache_low(table_name); @@ -601,9 +621,7 @@ dict_table_get_on_id_low( dict_table_t* table; ulint fold; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* Look for the table name in the hash table */ fold = ut_fold_dulint(table_id); diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index a23f89954a4..a05bc513efd 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -72,7 +72,8 @@ void dict_mem_table_add_col( /*===================*/ dict_table_t* table, /* in: table */ - const char* name, /* in: column name */ + mem_heap_t* heap, /* in: temporary memory heap, or NULL */ + const char* name, /* in: column name, or NULL */ ulint mtype, /* in: main datatype */ ulint prtype, /* in: precise type */ ulint len); /* in: precision */ @@ -158,10 +159,13 @@ struct dict_col_struct{ of an index */ }; -/* DICT_MAX_INDEX_COL_LEN is measured in bytes and is the max index column -length + 1. Starting from 4.1.6, we set it to < 3 * 256, so that one can -create a column prefix index on 255 characters of a TEXT field also in the -UTF-8 charset. In that charset, a character may take at most 3 bytes. */ +/* DICT_MAX_INDEX_COL_LEN is measured in bytes and is the maximum +indexed column length (or indexed prefix length). It is set to 3*256, +so that one can create a column prefix index on 256 characters of a +TEXT or VARCHAR column also in the UTF-8 charset. In that charset, +a character may take at most 3 bytes. +This constant MUST NOT BE CHANGED, or the compatibility of InnoDB data +files would be at risk! */ #define DICT_MAX_INDEX_COL_LEN 768 @@ -311,11 +315,11 @@ struct dict_table_struct{ unsigned n_cols:10;/* number of columns */ dict_col_t* cols; /* array of column descriptions */ const char* col_names; - /* n_def column names packed in an - "name1\0name2\0...nameN\0" array. until - n_def reaches n_cols, this is allocated with - ut_malloc, and the final size array is - allocated through the table's heap. */ + /* Column names packed in a character string + "name1\0name2\0...nameN\0". Until + the string contains n_cols, it will be + allocated from a temporary heap. The final + string will be allocated from table->heap. */ hash_node_t name_hash; /* hash chain node */ hash_node_t id_hash; /* hash chain node */ UT_LIST_BASE_NODE_T(dict_index_t) @@ -407,6 +411,21 @@ struct dict_table_struct{ SELECT MAX(auto inc column) */ ib_longlong autoinc;/* autoinc counter value to give to the next inserted row */ + + ib_longlong autoinc_increment; + /* The increment step of the auto increment + column. Value must be greater than or equal + to 1 */ + ulong n_waiting_or_granted_auto_inc_locks; + /* This counter is used to track the number + of granted and pending autoinc locks on this + table. This value is set after acquiring the + kernel mutex but we peek the contents to + determine whether other transactions have + acquired the AUTOINC lock or not. Of course + only one transaction can be granted the + lock but there can be multiple waiters. */ + #ifdef UNIV_DEBUG ulint magic_n;/* magic number */ # define DICT_TABLE_MAGIC_N 76333786 diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index d04269fc157..82e95a2e920 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -245,7 +245,7 @@ will be able to insert new data to the database without running out the tablespace. Only free extents are taken into account and we also subtract the safety margin required by the above function fsp_reserve_free_extents. */ -ulint +ullint fsp_get_available_space_in_free_extents( /*====================================*/ /* out: available space in kB */ diff --git a/storage/innobase/include/ha0ha.ic b/storage/innobase/include/ha0ha.ic index 1584e1ff4bf..fb264377f28 100644 --- a/storage/innobase/include/ha0ha.ic +++ b/storage/innobase/include/ha0ha.ic @@ -81,9 +81,7 @@ ha_search( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ node = ha_chain_get_first(table, fold); @@ -113,9 +111,7 @@ ha_search_and_get_data( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ node = ha_chain_get_first(table, fold); @@ -145,9 +141,7 @@ ha_search_with_data( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ node = ha_chain_get_first(table, fold); @@ -177,9 +171,7 @@ ha_search_and_delete_if_found( { ha_node_t* node; -#ifdef UNIV_SYNC_DEBUG ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold))); -#endif /* UNIV_SYNC_DEBUG */ node = ha_search_with_data(table, fold, data); diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 2d27034fdfe..7fb50988941 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -1,6 +1,9 @@ #ifndef HA_INNODB_PROTOTYPES_H #define HA_INNODB_PROTOTYPES_H +#include "univ.i" /* ulint, uint */ +#include "m_ctype.h" /* CHARSET_INFO */ + /* Prototypes for global functions in ha_innodb.cc that are called by InnoDB's C-code. */ @@ -19,4 +22,30 @@ innobase_convert_string( CHARSET_INFO* from_cs, uint* errors); +/********************************************************************** +Returns true if the thread is the replication thread on the slave +server. Used in srv_conc_enter_innodb() to determine if the thread +should be allowed to enter InnoDB - the replication thread is treated +differently than other threads. Also used in +srv_conc_force_exit_innodb(). */ + +ibool +thd_is_replication_slave_thread( +/*============================*/ + /* out: true if thd is the replication thread */ + void* thd); /* in: thread handle (THD*) */ + +/********************************************************************** +Returns true if the transaction this thread is processing has edited +non-transactional tables. Used by the deadlock detector when deciding +which transaction to rollback in case of a deadlock - we try to avoid +rolling back transactions that have edited non-transactional tables. */ + +ibool +thd_has_edited_nontrans_tables( +/*===========================*/ + /* out: true if non-transactional tables have + been edited */ + void* thd); /* in: thread handle (THD*) */ + #endif diff --git a/storage/innobase/include/lock0iter.h b/storage/innobase/include/lock0iter.h new file mode 100644 index 00000000000..d063a360c1f --- /dev/null +++ b/storage/innobase/include/lock0iter.h @@ -0,0 +1,52 @@ +/****************************************************** +Lock queue iterator type and function prototypes. + +(c) 2007 Innobase Oy + +Created July 16, 2007 Vasil Dimov +*******************************************************/ + +#ifndef lock0iter_h +#define lock0iter_h + +#include "univ.i" +#include "lock0types.h" + +typedef struct lock_queue_iterator_struct { + lock_t* current_lock; + /* In case this is a record lock queue (not table lock queue) + then bit_no is the record number within the heap in which the + record is stored. */ + ulint bit_no; +} lock_queue_iterator_t; + +/*********************************************************************** +Initialize lock queue iterator so that it starts to iterate from +"lock". bit_no specifies the record number within the heap where the +record is stored. It can be undefined (ULINT_UNDEFINED) in two cases: +1. If the lock is a table lock, thus we have a table lock queue; +2. If the lock is a record lock and it is a wait lock. In this case + bit_no is calculated in this function by using + lock_rec_find_set_bit(). There is exactly one bit set in the bitmap + of a wait lock. */ + +void +lock_queue_iterator_reset( +/*======================*/ + lock_queue_iterator_t* iter, /* out: iterator */ + lock_t* lock, /* in: lock to start from */ + ulint bit_no);/* in: record number in the + heap */ + +/*********************************************************************** +Gets the previous lock in the lock queue, returns NULL if there are no +more locks (i.e. the current lock is the first one). The iterator is +receded (if not-NULL is returned). */ + +lock_t* +lock_queue_iterator_get_prev( +/*=========================*/ + /* out: previous lock or NULL */ + lock_queue_iterator_t* iter); /* in/out: iterator */ + +#endif /* lock0iter_h */ diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 6b863e32183..8b08b6284f6 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -519,6 +519,18 @@ lock_is_table_exclusive( dict_table_t* table, /* in: table */ trx_t* trx); /* in: transaction */ /************************************************************************* +Checks if a lock request lock1 has to wait for request lock2. */ + +ibool +lock_has_to_wait( +/*=============*/ + /* out: TRUE if lock1 has to wait for lock2 to be + removed */ + lock_t* lock1, /* in: waiting lock */ + lock_t* lock2); /* in: another lock; NOTE that it is assumed that this + has a lock bit set on the same record as in lock1 if + the locks are record locks */ +/************************************************************************* Checks that a transaction id is sensible, i.e., not in the future. */ ibool @@ -597,7 +609,7 @@ lock_validate(void); /* out: TRUE if ok */ /************************************************************************* Return approximate number or record locks (bits set in the bitmap) for -this transaction. Since delete-marked records ma ybe removed, the +this transaction. Since delete-marked records may be removed, the record count will not be precise. */ ulint diff --git a/storage/innobase/include/lock0lock.ic b/storage/innobase/include/lock0lock.ic index feec460bec8..311623b190b 100644 --- a/storage/innobase/include/lock0lock.ic +++ b/storage/innobase/include/lock0lock.ic @@ -65,9 +65,7 @@ lock_clust_rec_some_has_impl( { dulint trx_id; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(index->type & DICT_CLUSTERED); ut_ad(page_rec_is_user_rec(rec)); diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h new file mode 100644 index 00000000000..7703a2b7def --- /dev/null +++ b/storage/innobase/include/lock0priv.h @@ -0,0 +1,101 @@ +/****************************************************** +Lock module internal structures and methods. + +(c) 2007 Innobase Oy + +Created July 12, 2007 Vasil Dimov +*******************************************************/ + +#ifndef lock0priv_h +#define lock0priv_h + +#ifndef LOCK_MODULE_IMPLEMENTATION +/* If you need to access members of the structures defined in this +file, please write appropriate functions that retrieve them and put +those functions in lock/ */ +#error Do not include lock0priv.h outside of the lock/ module +#endif + +#include "univ.i" +#include "dict0types.h" +#include "hash0hash.h" +#include "trx0types.h" +#include "ut0lst.h" + +/* A table lock */ +typedef struct lock_table_struct lock_table_t; +struct lock_table_struct { + dict_table_t* table; /* database table in dictionary + cache */ + UT_LIST_NODE_T(lock_t) + locks; /* list of locks on the same + table */ +}; + +/* Record lock for a page */ +typedef struct lock_rec_struct lock_rec_t; +struct lock_rec_struct { + ulint space; /* space id */ + ulint page_no; /* page number */ + ulint n_bits; /* number of bits in the lock + bitmap; NOTE: the lock bitmap is + placed immediately after the + lock struct */ +}; + +/* Lock struct */ +struct lock_struct { + trx_t* trx; /* transaction owning the + lock */ + UT_LIST_NODE_T(lock_t) + trx_locks; /* list of the locks of the + transaction */ + ulint type_mode; /* lock type, mode, LOCK_GAP or + LOCK_REC_NOT_GAP, + LOCK_INSERT_INTENTION, + wait flag, ORed */ + hash_node_t hash; /* hash chain node for a record + lock */ + dict_index_t* index; /* index for a record lock */ + union { + lock_table_t tab_lock;/* table lock */ + lock_rec_t rec_lock;/* record lock */ + } un_member; +}; + +/************************************************************************* +Gets the type of a lock. */ +UNIV_INLINE +ulint +lock_get_type( +/*==========*/ + /* out: LOCK_TABLE or LOCK_REC */ + const lock_t* lock); /* in: lock */ + +/************************************************************************** +Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED, +if none found. */ + +ulint +lock_rec_find_set_bit( +/*==================*/ + /* out: bit index == heap number of the record, or + ULINT_UNDEFINED if none found */ + lock_t* lock); /* in: record lock with at least one bit set */ + +/************************************************************************* +Gets the previous record lock set on a record. */ + +lock_t* +lock_rec_get_prev( +/*==============*/ + /* out: previous lock on the same record, NULL if + none exists */ + lock_t* in_lock,/* in: record lock */ + ulint heap_no);/* in: heap number of the record */ + +#ifndef UNIV_NONINL +#include "lock0priv.ic" +#endif + +#endif /* lock0priv_h */ diff --git a/storage/innobase/include/lock0priv.ic b/storage/innobase/include/lock0priv.ic new file mode 100644 index 00000000000..4bc8397509d --- /dev/null +++ b/storage/innobase/include/lock0priv.ic @@ -0,0 +1,32 @@ +/****************************************************** +Lock module internal inline methods. + +(c) 2007 Innobase Oy + +Created July 16, 2007 Vasil Dimov +*******************************************************/ + +/* This file contains only methods which are used in +lock/lock0* files, other than lock/lock0lock.c. +I.e. lock/lock0lock.c contains more internal inline +methods but they are used only in that file. */ + +#ifndef LOCK_MODULE_IMPLEMENTATION +#error Do not include lock0priv.ic outside of the lock/ module +#endif + +/************************************************************************* +Gets the type of a lock. */ +UNIV_INLINE +ulint +lock_get_type( +/*==========*/ + /* out: LOCK_TABLE or LOCK_REC */ + const lock_t* lock) /* in: lock */ +{ + ut_ad(lock); + + return(lock->type_mode & LOCK_TYPE_MASK); +} + +/* vim: set filetype=c: */ diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index 06deff196bc..df0a8baf2d5 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -255,9 +255,7 @@ log_block_init( { ulint no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ no = log_block_convert_lsn_to_no(lsn); @@ -279,9 +277,7 @@ log_block_init_in_old_format( { ulint no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ no = log_block_convert_lsn_to_no(lsn); diff --git a/storage/innobase/include/mem0mem.h b/storage/innobase/include/mem0mem.h index f68d45d83df..2d5fd1db6c3 100644 --- a/storage/innobase/include/mem0mem.h +++ b/storage/innobase/include/mem0mem.h @@ -280,17 +280,6 @@ mem_strdupl( ulint len); /* in: length of str, in bytes */ /************************************************************************** -Makes a NUL-terminated quoted copy of a NUL-terminated string. */ -UNIV_INLINE -char* -mem_strdupq( -/*========*/ - /* out, own: a quoted copy of the string, - must be deallocated with mem_free */ - const char* str, /* in: string to be copied */ - char q); /* in: quote character */ - -/************************************************************************** Duplicates a NUL-terminated string, allocated from a memory heap. */ char* diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic index 069f8de36cb..adae9ad8a33 100644 --- a/storage/innobase/include/mem0mem.ic +++ b/storage/innobase/include/mem0mem.ic @@ -167,6 +167,8 @@ mem_heap_alloc( mem_block_set_free(block, free + MEM_SPACE_NEEDED(n)); #ifdef UNIV_MEM_DEBUG + UNIV_MEM_ALLOC(buf, + n + MEM_FIELD_HEADER_SIZE + MEM_FIELD_TRAILER_SIZE); /* In the debug version write debugging info to the field */ mem_field_init((byte*)buf, n); @@ -177,8 +179,10 @@ mem_heap_alloc( #endif #ifdef UNIV_SET_MEM_TO_ZERO + UNIV_MEM_ALLOC(buf, n); memset(buf, '\0', n); #endif + UNIV_MEM_ALLOC(buf, n); return(buf); } @@ -267,15 +271,19 @@ mem_heap_free_heap_top( ut_ad(mem_block_get_start(block) <= mem_block_get_free(block)); /* In the debug version erase block from top up */ - - mem_erase_buf(old_top, (byte*)block + block->len - old_top); + { + ulint len = (byte*)block + block->len - old_top; + mem_erase_buf(old_top, len); + UNIV_MEM_FREE(old_top, len); + } /* Update allocated memory count */ mutex_enter(&mem_hash_mutex); mem_current_allocated_memory -= (total_size - size); mutex_exit(&mem_hash_mutex); - -#endif +#else /* UNIV_MEM_DEBUG */ + UNIV_MEM_FREE(old_top, (byte*)block + block->len - old_top); +#endif /* UNIV_MEM_DEBUG */ /* If free == start, we may free the block if it is not the first one */ @@ -369,6 +377,8 @@ mem_heap_free_top( if ((heap != block) && (mem_block_get_free(block) == mem_block_get_start(block))) { mem_heap_block_free(heap, block); + } else { + UNIV_MEM_FREE((byte*) block + mem_block_get_free(block), n); } } @@ -590,41 +600,6 @@ mem_strdupl( } /************************************************************************** -Makes a NUL-terminated quoted copy of a NUL-terminated string. */ -UNIV_INLINE -char* -mem_strdupq( -/*========*/ - /* out, own: a quoted copy of the string, - must be deallocated with mem_free */ - const char* str, /* in: string to be copied */ - char q) /* in: quote character */ -{ - char* dst; - char* d; - const char* s = str; - size_t len = strlen(str) + 3; - /* calculate the number of quote characters in the string */ - while((s = strchr(s, q)) != NULL) { - s++; - len++; - } - /* allocate the quoted string, and copy it */ - d = dst = mem_alloc(len); - *d++ = q; - s = str; - while(*s) { - if ((*d++ = *s++) == q) { - *d++ = q; - } - } - *d++ = q; - *d++ = '\0'; - ut_ad((ssize_t) len == d - dst); - return(dst); -} - -/************************************************************************** Makes a NUL-terminated copy of a nonterminated string, allocated from a memory heap. */ UNIV_INLINE diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 5ffcdf7e58c..9eb44d3f4a8 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -94,7 +94,8 @@ log. */ #define OS_FILE_PATH_ERROR 74 #define OS_FILE_AIO_RESOURCES_RESERVED 75 /* wait for OS aio resources to become available again */ -#define OS_FILE_ERROR_NOT_SPECIFIED 76 +#define OS_FILE_SHARING_VIOLATION 76 +#define OS_FILE_ERROR_NOT_SPECIFIED 77 /* Types for aio operations */ #define OS_FILE_READ 10 diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index 833d268c9de..273007c2778 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -531,6 +531,15 @@ page_get_free_space_of_empty( /* out: free space */ ulint comp) /* in: nonzero=compact page format */ __attribute__((const)); +/***************************************************************** +Calculates free space if a page is emptied. */ + +ulint +page_get_free_space_of_empty_noninline( +/*===================================*/ + /* out: free space */ + ulint comp) /* in: nonzero=compact page format */ + __attribute__((const)); /**************************************************************** Returns the sum of the sizes of the records in the record list excluding the infimum and supremum records. */ diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index ace90247b80..95aa65fabba 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -795,7 +795,8 @@ UNIV_INLINE void rec_offs_set_n_alloc( /*=================*/ - ulint* offsets, /* in: array for rec_get_offsets() */ + ulint* offsets, /* out: array for rec_get_offsets(), + must be allocated */ ulint n_alloc) /* in: number of elements */ { ut_ad(offsets); @@ -995,6 +996,9 @@ rec_offs_nth_size( { ut_ad(rec_offs_validate(NULL, NULL, offsets)); ut_ad(n < rec_offs_n_fields(offsets)); + if (!n) { + return(rec_offs_base(offsets)[1 + n] & REC_OFFS_MASK); + } return((rec_offs_base(offsets)[1 + n] - rec_offs_base(offsets)[n]) & REC_OFFS_MASK); } @@ -1279,7 +1283,8 @@ UNIV_INLINE void rec_offs_set_n_fields( /*==================*/ - ulint* offsets, /* in: array returned by rec_get_offsets() */ + ulint* offsets, /* in/out: array returned by + rec_get_offsets() */ ulint n_fields) /* in: number of fields */ { ut_ad(offsets); diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 1448efe94fe..aabb7f5f047 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -670,6 +670,7 @@ struct row_prebuilt_struct { to this heap */ mem_heap_t* old_vers_heap; /* memory heap where a previous version is built in consistent read */ + ulonglong last_value; /* last value of AUTO-INC interval */ ulint magic_n2; /* this should be the same as magic_n */ }; diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h index 96273a18cd5..4bde648f18e 100644 --- a/storage/innobase/include/row0sel.h +++ b/storage/innobase/include/row0sel.h @@ -171,7 +171,17 @@ row_search_check_if_query_cache_permitted( trx_t* trx, /* in: transaction object */ const char* norm_name); /* in: concatenation of database name, '/' char, table name */ +/*********************************************************************** +Read the max AUTOINC value from an index. */ +ulint +row_search_max_autoinc( +/*===================*/ + /* out: DB_SUCCESS if all OK else + error code */ + dict_index_t* index, /* in: index to search */ + const char* col_name, /* in: autoinc column name */ + ib_longlong* value); /* out: AUTOINC value read */ /* A structure for caching column values for prefetched rows */ struct sel_buf_struct{ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 9b617841f4c..1ad695f700c 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -124,6 +124,8 @@ extern ulint srv_fast_shutdown; /* If this is 1, do not do a transactions). */ extern ibool srv_innodb_status; +extern ibool srv_stats_on_metadata; + extern ibool srv_use_doublewrite_buf; extern ibool srv_use_checksums; diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index defe0692aa8..b41593d0a96 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -133,9 +133,8 @@ rw_lock_s_lock_low( const char* file_name, /* in: file name where lock requested */ ulint line) /* in: line where requested */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(rw_lock_get_mutex(lock))); -#endif /* UNIV_SYNC_DEBUG */ + /* Check if the writer field is free */ if (UNIV_LIKELY(lock->writer == RW_LOCK_NOT_LOCKED)) { @@ -232,7 +231,7 @@ rw_lock_s_lock_func( owns an s-lock here, it may end up in a deadlock with another thread which requests an x-lock here. Therefore, we will forbid recursive s-locking of a latch: the following assert will warn the programmer - of the possibility of a tjis kind of deadlock. If we want to implement + of the possibility of this kind of a deadlock. If we want to implement safe recursive s-locking, we should keep in a list the thread ids of the threads which have s-locked a latch. This would use some CPU time. */ diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index e7e135c0c7e..69c0cd9e39b 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -114,13 +114,20 @@ mutex_enter_func( mutex_t* mutex, /* in: pointer to mutex */ const char* file_name, /* in: file name where locked */ ulint line); /* in: line where locked */ +/****************************************************************** +NOTE! The following macro should be used in mutex locking, not the +corresponding function. */ + +#define mutex_enter_nowait(M) \ + mutex_enter_nowait_func((M), __FILE__, __LINE__) /************************************************************************ -Tries to lock the mutex for the current thread. If the lock is not acquired -immediately, returns with return value 1. */ +NOTE! Use the corresponding macro in the header file, not this function +directly. Tries to lock the mutex for the current thread. If the lock is not +acquired immediately, returns with return value 1. */ ulint -mutex_enter_nowait( -/*===============*/ +mutex_enter_nowait_func( +/*====================*/ /* out: 0 if succeed, 1 if not */ mutex_t* mutex, /* in: pointer to mutex */ const char* file_name, /* in: file name where mutex @@ -170,7 +177,16 @@ Checks that the mutex has been initialized. */ ibool mutex_validate( /*===========*/ - mutex_t* mutex); + const mutex_t* mutex); +/********************************************************************** +Checks that the current thread owns the mutex. Works only +in the debug version. */ + +ibool +mutex_own( +/*======*/ + /* out: TRUE if owns */ + const mutex_t* mutex); /* in: mutex */ #endif /* UNIV_DEBUG */ #ifdef UNIV_SYNC_DEBUG /********************************************************************** @@ -215,15 +231,6 @@ sync_thread_levels_empty_gen( also purge_is_running mutex is allowed */ /********************************************************************** -Checks that the current thread owns the mutex. Works only -in the debug version. */ - -ibool -mutex_own( -/*======*/ - /* out: TRUE if owns */ - mutex_t* mutex); /* in: mutex */ -/********************************************************************** Gets the debug information for a reserved mutex. */ void @@ -248,7 +255,7 @@ UNIV_INLINE ulint mutex_get_lock_word( /*================*/ - mutex_t* mutex); /* in: mutex */ + const mutex_t* mutex); /* in: mutex */ #ifdef UNIV_SYNC_DEBUG /********************************************************************** NOT to be used outside this module except in debugging! Gets the waiters @@ -258,7 +265,7 @@ ulint mutex_get_waiters( /*==============*/ /* out: value to set */ - mutex_t* mutex); /* in: mutex */ + const mutex_t* mutex); /* in: mutex */ #endif /* UNIV_SYNC_DEBUG */ /* @@ -479,13 +486,13 @@ struct mutex_struct { #ifdef UNIV_SYNC_DEBUG const char* file_name; /* File where the mutex was locked */ ulint line; /* Line where the mutex was locked */ - os_thread_id_t thread_id; /* Debug version: The thread id of the - thread which locked the mutex. */ ulint level; /* Level in the global latching order */ #endif /* UNIV_SYNC_DEBUG */ const char* cfile_name;/* File name where mutex created */ ulint cline; /* Line where created */ #ifdef UNIV_DEBUG + os_thread_id_t thread_id; /* The thread id of the thread + which locked the mutex. */ ulint magic_n; # define MUTEX_MAGIC_N (ulint)979585 #endif /* UNIV_DEBUG */ diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic index 4b48a1469ff..9bd5ac2a518 100644 --- a/storage/innobase/include/sync0sync.ic +++ b/storage/innobase/include/sync0sync.ic @@ -6,6 +6,16 @@ Mutex, the basic synchronization primitive Created 9/5/1995 Heikki Tuuri *******************************************************/ +#if defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86) +/* %z0: Use the size of operand %0 which in our case is *m to determine +instruction size, it should end up as xchgl. "1" in the input constraint, +says that "in" has to go in the same place as "out".*/ +#define TAS(m, in, out) \ + asm volatile ("xchg%z0 %2, %0" \ + : "=g" (*(m)), "=r" (out) \ + : "1" (in)) /* Note: "1" here refers to "=r" (out) */ +#endif + /********************************************************************** Sets the waiters field in a mutex. */ @@ -89,20 +99,10 @@ mutex_test_and_set( return(res); #elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86) - ulint* lw; ulint res; - lw = &(mutex->lock_word); - - /* In assembly we use the so-called AT & T syntax where - the order of operands is inverted compared to the ordinary Intel - syntax. The 'l' after the mnemonics denotes a 32-bit operation. - The line after the code tells which values come out of the asm - code, and the second line tells the input to the asm code. */ + TAS(&mutex->lock_word, 1, res); - asm volatile("movl $1, %%eax; xchgl (%%ecx), %%eax" : - "=eax" (res), "=m" (*lw) : - "ecx" (lw)); return(res); #else ibool ret; @@ -141,20 +141,9 @@ mutex_reset_lock_word( __asm MOV ECX, lw __asm XCHG EDX, DWORD PTR [ECX] #elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86) - ulint* lw; - - lw = &(mutex->lock_word); - - /* In assembly we use the so-called AT & T syntax where - the order of operands is inverted compared to the ordinary Intel - syntax. The 'l' after the mnemonics denotes a 32-bit operation. */ + ulint res; - asm volatile("movl $0, %%eax; xchgl (%%ecx), %%eax" : - "=m" (*lw) : - "ecx" (lw) : - "eax"); /* gcc does not seem to understand - that our asm code resets eax: tell it - explicitly that after the third ':' */ + TAS(&mutex->lock_word, 0, res); #else mutex->lock_word = 0; @@ -168,9 +157,9 @@ UNIV_INLINE ulint mutex_get_lock_word( /*================*/ - mutex_t* mutex) /* in: mutex */ + const mutex_t* mutex) /* in: mutex */ { - volatile ulint* ptr; /* declared volatile to ensure that + const volatile ulint* ptr; /* declared volatile to ensure that lock_word is loaded from memory */ ut_ad(mutex); @@ -186,9 +175,9 @@ ulint mutex_get_waiters( /*==============*/ /* out: value to set */ - mutex_t* mutex) /* in: mutex */ + const mutex_t* mutex) /* in: mutex */ { - volatile ulint* ptr; /* declared volatile to ensure that + const volatile ulint* ptr; /* declared volatile to ensure that the value is read from memory */ ut_ad(mutex); @@ -206,11 +195,11 @@ mutex_exit( /*=======*/ mutex_t* mutex) /* in: pointer to mutex */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(mutex)); - mutex->thread_id = ULINT_UNDEFINED; + ut_d(mutex->thread_id = ULINT_UNDEFINED); +#ifdef UNIV_SYNC_DEBUG sync_thread_reset_level(mutex); #endif mutex_reset_lock_word(mutex); @@ -250,6 +239,7 @@ mutex_enter_func( ulint line) /* in: line where locked */ { ut_ad(mutex_validate(mutex)); + ut_ad(!mutex_own(mutex)); /* Note that we do not peek at the value of lock_word before trying the atomic test_and_set; we could peek, and possibly save time. */ @@ -259,6 +249,7 @@ mutex_enter_func( #endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */ if (!mutex_test_and_set(mutex)) { + ut_d(mutex->thread_id = os_thread_get_curr_id()); #ifdef UNIV_SYNC_DEBUG mutex_set_debug_info(mutex, file_name, line); #endif diff --git a/storage/innobase/include/trx0sys.ic b/storage/innobase/include/trx0sys.ic index 9c950be09f0..86b71df08d6 100644 --- a/storage/innobase/include/trx0sys.ic +++ b/storage/innobase/include/trx0sys.ic @@ -62,9 +62,7 @@ trx_sys_get_nth_rseg( trx_sys_t* sys, /* in: trx system */ ulint n) /* in: index of slot */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(n < TRX_SYS_N_RSEGS); return(sys->rseg_array[n]); @@ -121,9 +119,7 @@ trx_sysf_rseg_get_space( ulint i, /* in: slot index == rseg id */ mtr_t* mtr) /* in: mtr */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(sys_header); ut_ad(i < TRX_SYS_N_RSEGS); @@ -146,9 +142,7 @@ trx_sysf_rseg_get_page_no( mtr_t* mtr) /* in: mtr */ { ut_ad(sys_header); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(i < TRX_SYS_N_RSEGS); return(mtr_read_ulint(sys_header + TRX_SYS_RSEGS @@ -168,9 +162,7 @@ trx_sysf_rseg_set_space( ulint space, /* in: space id */ mtr_t* mtr) /* in: mtr */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(sys_header); ut_ad(i < TRX_SYS_N_RSEGS); @@ -194,9 +186,7 @@ trx_sysf_rseg_set_page_no( slot is reset to unused */ mtr_t* mtr) /* in: mtr */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(sys_header); ut_ad(i < TRX_SYS_N_RSEGS); @@ -250,9 +240,7 @@ trx_get_on_id( { trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = UT_LIST_GET_FIRST(trx_sys->trx_list); @@ -282,9 +270,7 @@ trx_list_get_min_trx_id(void) { trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = UT_LIST_GET_LAST(trx_sys->trx_list); @@ -307,9 +293,7 @@ trx_is_active( { trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (ut_dulint_cmp(trx_id, trx_list_get_min_trx_id()) < 0) { @@ -346,9 +330,7 @@ trx_sys_get_new_trx_id(void) { dulint id; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* VERY important: after the database is started, max_trx_id value is divisible by TRX_SYS_TRX_ID_WRITE_MARGIN, and the following if @@ -378,9 +360,7 @@ trx_sys_get_new_trx_no(void) /*========================*/ /* out: new, allocated trx number */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ return(trx_sys_get_new_trx_id()); } diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 8232699c7f9..5017c15aaf0 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -371,12 +371,22 @@ trx_is_interrupted( #define trx_is_interrupted(trx) FALSE #endif /* !UNIV_HOTBACKUP */ +/*********************************************************************** +Compares the "weight" (or size) of two transactions. The weight of one +transaction is estimated as the number of altered rows + the number of +locked rows. Transactions that have edited non-transactional tables are +considered heavier than ones that have not. */ + +int +trx_weight_cmp( +/*===========*/ + /* out: <0, 0 or >0; similar to strcmp(3) */ + trx_t* a, /* in: the first transaction to be compared */ + trx_t* b); /* in: the second transaction to be compared */ /* Signal to a transaction */ struct trx_sig_struct{ ulint type; /* signal type */ - ulint state; /* TRX_SIG_WAITING or - TRX_SIG_BEING_HANDLED */ ulint sender; /* TRX_SIG_SELF or TRX_SIG_OTHER_SESS */ que_thr_t* receiver; /* non-NULL if the sender of the signal @@ -404,7 +414,7 @@ struct trx_struct{ const char* op_info; /* English text describing the current operation, or an empty string */ - ulint type; /* TRX_USER, TRX_PURGE */ + unsigned is_purge:1; /* 0=user transaction, 1=purge */ ulint conc_state; /* state of the trx from the point of view of concurrency control: TRX_ACTIVE, TRX_COMMITTED_IN_MEMORY, @@ -455,7 +465,8 @@ struct trx_struct{ dulint table_id; /* table id if the preceding field is TRUE */ /*------------------------------*/ - int active_trans; /* 1 - if a transaction in MySQL + unsigned duplicates:2; /* TRX_DUP_IGNORE | TRX_DUP_REPLACE */ + unsigned active_trans:2; /* 1 - if a transaction in MySQL is active. 2 - if prepare_commit_mutex was taken */ void* mysql_thd; /* MySQL thread handle corresponding @@ -471,31 +482,6 @@ struct trx_struct{ ib_longlong mysql_log_offset;/* if MySQL binlog is used, this field contains the end offset of the binlog entry */ - const char* mysql_master_log_file_name; - /* if the database server is a MySQL - replication slave, we have here the - master binlog name up to which - replication has processed; otherwise - this is a pointer to a null - character */ - ib_longlong mysql_master_log_pos; - /* if the database server is a MySQL - replication slave, this is the - position in the log file up to which - replication has processed */ - /* A MySQL variable mysql_thd->synchronous_repl tells if we have - to use synchronous replication. See ha_innodb.cc. */ - char* repl_wait_binlog_name;/* NULL, or if synchronous MySQL - replication is used, the binlog name - up to which we must communicate the - binlog to the slave, before returning - from a commit; this is the same as - mysql_log_file_name, but we allocate - and copy the name to a separate buffer - here */ - ib_longlong repl_wait_binlog_pos;/* see above at - repl_wait_binlog_name */ - os_thread_id_t mysql_thread_id;/* id of the MySQL thread associated with this transaction object */ ulint mysql_process_no;/* since in Linux, 'top' reports @@ -604,7 +590,7 @@ struct trx_struct{ NULL */ ibool was_chosen_as_deadlock_victim; /* when the transaction decides to wait - for a lock, this it sets this to FALSE; + for a lock, it sets this to FALSE; if another transaction chooses this transaction as a victim in deadlock resolution, it sets this to TRUE */ @@ -645,7 +631,12 @@ struct trx_struct{ cannot be any activity in the undo logs! */ dulint undo_no; /* next undo log record number to - assign */ + assign; since the undo log is + private for a transaction, this + is a simple ascending sequence + with no gaps; thus it represents + the number of modified/inserted + rows in a transaction */ trx_savept_t last_sql_stat_start; /* undo_no when the last sql statement was started: in case of an error, trx @@ -665,6 +656,9 @@ struct trx_struct{ trx_undo_arr_t* undo_no_arr; /* array of undo numbers of undo log records which are currently processed by a rollback operation */ + ulint n_autoinc_rows; /* no. of AUTO-INC rows required for + an SQL statement. This is useful for + multi-row INSERTs */ /*------------------------------*/ char detailed_error[256]; /* detailed error message for last error, or empty. */ @@ -675,25 +669,19 @@ struct trx_struct{ single operation of a transaction, e.g., a parallel query */ -/* Transaction types */ -#define TRX_USER 1 /* normal user transaction */ -#define TRX_PURGE 2 /* purge transaction: this is not - inserted to the trx list of trx_sys - and no rollback segment is assigned to - this */ -/* Transaction concurrency states */ +/* Transaction concurrency states (trx->conc_state) */ #define TRX_NOT_STARTED 1 #define TRX_ACTIVE 2 #define TRX_COMMITTED_IN_MEMORY 3 #define TRX_PREPARED 4 /* Support for 2PC/XA */ -/* Transaction execution states when trx state is TRX_ACTIVE */ +/* Transaction execution states when trx->conc_state == TRX_ACTIVE */ #define TRX_QUE_RUNNING 1 /* transaction is running */ #define TRX_QUE_LOCK_WAIT 2 /* transaction is waiting for a lock */ #define TRX_QUE_ROLLING_BACK 3 /* transaction is rolling back */ #define TRX_QUE_COMMITTING 4 /* transaction is committing */ -/* Transaction isolation levels */ +/* Transaction isolation levels (trx->isolation_level) */ #define TRX_ISO_READ_UNCOMMITTED 1 /* dirty read: non-locking SELECTs are performed so that we do not look at a possible @@ -728,6 +716,12 @@ struct trx_struct{ converted to LOCK IN SHARE MODE reads */ +/* Treatment of duplicate values (trx->duplicates; for example, in inserts). +Multiple flags can be combined with bitwise OR. */ +#define TRX_DUP_IGNORE 1 /* duplicate rows are to be updated */ +#define TRX_DUP_REPLACE 2 /* duplicate rows are to be replaced */ + + /* Types of a trx signal */ #define TRX_SIG_NO_SIGNAL 100 #define TRX_SIG_TOTAL_ROLLBACK 1 @@ -742,9 +736,6 @@ struct trx_struct{ session */ #define TRX_SIG_OTHER_SESS 2 /* sent by another session (which must hold rights to this) */ -/* Signal states */ -#define TRX_SIG_WAITING 1 -#define TRX_SIG_BEING_HANDLED 2 /* Commit command node in a query graph */ struct commit_node_struct{ diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 87849ab42c3..f53c6b01be4 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -222,13 +222,16 @@ trx_undo_lists_init( Assigns an undo log for a transaction. A new undo log is created or a cached undo log reused. */ -trx_undo_t* +ulint trx_undo_assign_undo( /*=================*/ - /* out: the undo log, NULL if did not succeed: out of - space */ - trx_t* trx, /* in: transaction */ - ulint type); /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ + /* out: DB_SUCCESS if undo log assign + * successful, possible error codes are: + * ER_TOO_MANY_CONCURRENT_TRXS + * DB_OUT_OF_FILE_SPAC + * DB_OUT_OF_MEMORY */ + trx_t* trx, /* in: transaction */ + ulint type); /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ /********************************************************************** Sets the state of the undo log segment at a transaction finish. */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 7a5cb21f07a..ba8e6e56219 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -40,9 +40,9 @@ if we are compiling on Windows. */ # undef VERSION /* Include the header file generated by GNU autoconf */ -#ifndef __WIN__ -# include "config.h" -#endif +# ifndef __WIN__ +# include "config.h" +# endif # ifdef HAVE_SCHED_H # include <sched.h> @@ -51,9 +51,9 @@ if we are compiling on Windows. */ /* When compiling for Itanium IA64, undefine the flag below to prevent use of the 32-bit x86 assembler in mutex operations. */ -#if defined(__WIN__) && !defined(WIN64) && !defined(_WIN64) -#define UNIV_CAN_USE_X86_ASSEMBLER -#endif +# if defined(__WIN__) && !defined(WIN64) && !defined(_WIN64) +# define UNIV_CAN_USE_X86_ASSEMBLER +# endif /* We only try to do explicit inlining of functions with gcc and Microsoft Visual C++ */ @@ -83,6 +83,8 @@ memory is read outside the allocated blocks. */ /* Make a non-inline debug version */ #if 0 +#define UNIV_DEBUG_VALGRIND /* Enable extra + Valgrind instrumentation */ #define UNIV_DEBUG /* Enable ut_ad() assertions */ #define UNIV_LIST_DEBUG /* debug UT_LIST_ macros */ #define UNIV_MEM_DEBUG /* detect memory leaks etc */ @@ -214,6 +216,8 @@ typedef __int64 ib_longlong; typedef longlong ib_longlong; #endif +typedef unsigned long long int ullint; + #ifndef __WIN__ #if SIZEOF_LONG != SIZEOF_VOIDP #error "Error: InnoDB's ulint must be of the same size as void*" @@ -298,5 +302,17 @@ typedef void* os_thread_ret_t; #include "ut0dbg.h" #include "ut0ut.h" #include "db0err.h" +#ifdef UNIV_DEBUG_VALGRIND +# include <valgrind/memcheck.h> +# define UNIV_MEM_VALID(addr, size) VALGRIND_MAKE_MEM_DEFINED(addr, size) +# define UNIV_MEM_INVALID(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size) +# define UNIV_MEM_FREE(addr, size) VALGRIND_MAKE_MEM_NOACCESS(addr, size) +# define UNIV_MEM_ALLOC(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size) +#else +# define UNIV_MEM_VALID(addr, size) do {} while(0) +# define UNIV_MEM_INVALID(addr, size) do {} while(0) +# define UNIV_MEM_FREE(addr, size) do {} while(0) +# define UNIV_MEM_ALLOC(addr, size) do {} while(0) +#endif #endif diff --git a/storage/innobase/include/ut0byte.ic b/storage/innobase/include/ut0byte.ic index 020cf9cedd9..01b6c29d08f 100644 --- a/storage/innobase/include/ut0byte.ic +++ b/storage/innobase/include/ut0byte.ic @@ -390,8 +390,8 @@ ut_bit_set_nth( # error "TRUE != 1" #endif if (val) { - return((1 << n) | a); + return(((ulint) 1 << n) | a); } else { - return(~(1 << n) & a); + return(~((ulint) 1 << n) & a); } } diff --git a/storage/innobase/include/ut0lst.h b/storage/innobase/include/ut0lst.h index 9735bf315c6..ebe2803fe23 100644 --- a/storage/innobase/include/ut0lst.h +++ b/storage/innobase/include/ut0lst.h @@ -74,6 +74,7 @@ the pointer to the node to be added to the list. NAME is the list name. */ ((N)->NAME).next = (BASE).start;\ ((N)->NAME).prev = NULL;\ if ((BASE).start != NULL) {\ + ut_ad((BASE).start != (N));\ (((BASE).start)->NAME).prev = (N);\ }\ (BASE).start = (N);\ @@ -94,6 +95,7 @@ the pointer to the node to be added to the list. NAME is the list name. */ ((N)->NAME).prev = (BASE).end;\ ((N)->NAME).next = NULL;\ if ((BASE).end != NULL) {\ + ut_ad((BASE).end != (N));\ (((BASE).end)->NAME).next = (N);\ }\ (BASE).end = (N);\ @@ -111,6 +113,7 @@ name, NODE1 and NODE2 are pointers to nodes. */ {\ ut_ad(NODE1);\ ut_ad(NODE2);\ + ut_ad((NODE1) != (NODE2));\ ((BASE).count)++;\ ((NODE2)->NAME).prev = (NODE1);\ ((NODE2)->NAME).next = ((NODE1)->NAME).next;\ diff --git a/storage/innobase/include/ut0mem.h b/storage/innobase/include/ut0mem.h index 90c16f4fad5..e56895bc142 100644 --- a/storage/innobase/include/ut0mem.h +++ b/storage/innobase/include/ut0mem.h @@ -63,7 +63,7 @@ ut_test_malloc( /* out: TRUE if succeeded */ ulint n); /* in: try to allocate this many bytes */ /************************************************************************** -Frees a memory bloock allocated with ut_malloc. */ +Frees a memory block allocated with ut_malloc. */ void ut_free( diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h index b4e9fa91491..825c10d5f11 100644 --- a/storage/innobase/include/ut0ut.h +++ b/storage/innobase/include/ut0ut.h @@ -119,7 +119,13 @@ ulint ut_2_power_up( /*==========*/ /* out: first power of 2 which is >= n */ - ulint n); /* in: number != 0 */ + ulint n) /* in: number != 0 */ + __attribute__((const)); + +/* Determine how many bytes (groups of 8 bits) are needed to +store the given number of bits. */ +#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8) + /**************************************************************** Sort function for ulint arrays. */ diff --git a/storage/innobase/include/ut0ut.ic b/storage/innobase/include/ut0ut.ic index 7b080216117..412717a094e 100644 --- a/storage/innobase/include/ut0ut.ic +++ b/storage/innobase/include/ut0ut.ic @@ -170,5 +170,5 @@ ut_2_exp( /* out: 2 to power n */ ulint n) /* in: number */ { - return(1 << n); + return((ulint) 1 << n); } diff --git a/storage/innobase/lock/Makefile.am b/storage/innobase/lock/Makefile.am deleted file mode 100644 index 4c6caa49853..00000000000 --- a/storage/innobase/lock/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = liblock.a - -liblock_a_SOURCES = lock0lock.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/lock/lock0iter.c b/storage/innobase/lock/lock0iter.c new file mode 100644 index 00000000000..0afa7019c86 --- /dev/null +++ b/storage/innobase/lock/lock0iter.c @@ -0,0 +1,90 @@ +/****************************************************** +Lock queue iterator. Can iterate over table and record +lock queues. + +(c) 2007 Innobase Oy + +Created July 16, 2007 Vasil Dimov +*******************************************************/ + +#define LOCK_MODULE_IMPLEMENTATION + +#include "univ.i" +#include "lock0iter.h" +#include "lock0lock.h" +#include "lock0priv.h" +#include "ut0dbg.h" +#include "ut0lst.h" + +/*********************************************************************** +Initialize lock queue iterator so that it starts to iterate from +"lock". bit_no specifies the record number within the heap where the +record is stored. It can be undefined (ULINT_UNDEFINED) in two cases: +1. If the lock is a table lock, thus we have a table lock queue; +2. If the lock is a record lock and it is a wait lock. In this case + bit_no is calculated in this function by using + lock_rec_find_set_bit(). There is exactly one bit set in the bitmap + of a wait lock. */ + +void +lock_queue_iterator_reset( +/*======================*/ + lock_queue_iterator_t* iter, /* out: iterator */ + lock_t* lock, /* in: lock to start from */ + ulint bit_no) /* in: record number in the + heap */ +{ + iter->current_lock = lock; + + if (bit_no != ULINT_UNDEFINED) { + + iter->bit_no = bit_no; + } else { + + switch (lock_get_type(lock)) { + case LOCK_TABLE: + iter->bit_no = ULINT_UNDEFINED; + break; + case LOCK_REC: + iter->bit_no = lock_rec_find_set_bit(lock); + ut_a(iter->bit_no != ULINT_UNDEFINED); + break; + default: + ut_error; + } + } +} + +/*********************************************************************** +Gets the previous lock in the lock queue, returns NULL if there are no +more locks (i.e. the current lock is the first one). The iterator is +receded (if not-NULL is returned). */ + +lock_t* +lock_queue_iterator_get_prev( +/*=========================*/ + /* out: previous lock or NULL */ + lock_queue_iterator_t* iter) /* in/out: iterator */ +{ + lock_t* prev_lock; + + switch (lock_get_type(iter->current_lock)) { + case LOCK_REC: + prev_lock = lock_rec_get_prev( + iter->current_lock, iter->bit_no); + break; + case LOCK_TABLE: + prev_lock = UT_LIST_GET_PREV( + un_member.tab_lock.locks, iter->current_lock); + break; + default: + ut_error; + } + + if (prev_lock != NULL) { + + iter->current_lock = prev_lock; + } + + return(prev_lock); +} diff --git a/storage/innobase/lock/lock0lock.c b/storage/innobase/lock/lock0lock.c index 84b64b146dc..39cbf83e58e 100644 --- a/storage/innobase/lock/lock0lock.c +++ b/storage/innobase/lock/lock0lock.c @@ -6,10 +6,14 @@ The transaction lock system Created 5/7/1996 Heikki Tuuri *******************************************************/ +#define LOCK_MODULE_IMPLEMENTATION + #include "lock0lock.h" +#include "lock0priv.h" #ifdef UNIV_NONINL #include "lock0lock.ic" +#include "lock0priv.ic" #endif #include "usr0sess.h" @@ -319,42 +323,6 @@ ibool lock_print_waits = FALSE; /* The lock system */ lock_sys_t* lock_sys = NULL; -/* A table lock */ -typedef struct lock_table_struct lock_table_t; -struct lock_table_struct{ - dict_table_t* table; /* database table in dictionary cache */ - UT_LIST_NODE_T(lock_t) - locks; /* list of locks on the same table */ -}; - -/* Record lock for a page */ -typedef struct lock_rec_struct lock_rec_t; -struct lock_rec_struct{ - ulint space; /* space id */ - ulint page_no; /* page number */ - ulint n_bits; /* number of bits in the lock bitmap */ - /* NOTE: the lock bitmap is placed immediately - after the lock struct */ -}; - -/* Lock struct */ -struct lock_struct{ - trx_t* trx; /* transaction owning the lock */ - UT_LIST_NODE_T(lock_t) - trx_locks; /* list of the locks of the - transaction */ - ulint type_mode; /* lock type, mode, LOCK_GAP or - LOCK_REC_NOT_GAP, - LOCK_INSERT_INTENTION, - wait flag, ORed */ - hash_node_t hash; /* hash chain node for a record lock */ - dict_index_t* index; /* index for a record lock */ - union { - lock_table_t tab_lock;/* table lock */ - lock_rec_t rec_lock;/* record lock */ - } un_member; -}; - /* We store info on the latest deadlock error to this buffer. InnoDB Monitor will then fetch it and print */ ibool lock_deadlock_found = FALSE; @@ -401,20 +369,6 @@ lock_deadlock_recursive( return LOCK_VICTIM_IS_START */ /************************************************************************* -Gets the type of a lock. */ -UNIV_INLINE -ulint -lock_get_type( -/*==========*/ - /* out: LOCK_TABLE or LOCK_REC */ - lock_t* lock) /* in: lock */ -{ - ut_ad(lock); - - return(lock->type_mode & LOCK_TYPE_MASK); -} - -/************************************************************************* Gets the nth bit of a record lock. */ UNIV_INLINE ibool @@ -611,8 +565,8 @@ UNIV_INLINE ulint lock_get_mode( /*==========*/ - /* out: mode */ - lock_t* lock) /* in: lock */ + /* out: mode */ + const lock_t* lock) /* in: lock */ { ut_ad(lock); @@ -1017,7 +971,7 @@ lock_rec_has_to_wait( /************************************************************************* Checks if a lock request lock1 has to wait for request lock2. */ -static + ibool lock_has_to_wait( /*=============*/ @@ -1098,7 +1052,7 @@ lock_rec_set_nth_bit( /************************************************************************** Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED, if none found. */ -static + ulint lock_rec_find_set_bit( /*==================*/ @@ -1162,9 +1116,7 @@ lock_rec_get_next_on_page( ulint space; ulint page_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); space = lock->un_member.rec_lock.space; @@ -1201,9 +1153,7 @@ lock_rec_get_first_on_page_addr( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = HASH_GET_FIRST(lock_sys->rec_hash, lock_rec_hash(space, page_no)); @@ -1261,9 +1211,7 @@ lock_rec_get_first_on_page( ulint space; ulint page_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ hash = buf_frame_get_lock_hash_val(ptr); @@ -1295,9 +1243,7 @@ lock_rec_get_next( rec_t* rec, /* in: record on a page */ lock_t* lock) /* in: lock */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); if (page_rec_is_comp(rec)) { @@ -1326,9 +1272,7 @@ lock_rec_get_first( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first_on_page(rec); if (UNIV_LIKELY_NULL(lock)) { @@ -1400,7 +1344,7 @@ lock_rec_copy( /************************************************************************* Gets the previous record lock set on a record. */ -static + lock_t* lock_rec_get_prev( /*==============*/ @@ -1414,9 +1358,7 @@ lock_rec_get_prev( ulint page_no; lock_t* found_lock = NULL; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); space = in_lock->un_member.rec_lock.space; @@ -1456,9 +1398,7 @@ lock_table_has( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* Look for stronger locks the same trx already has on the table */ @@ -1502,9 +1442,7 @@ lock_rec_has_expl( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); @@ -1552,9 +1490,7 @@ lock_rec_other_has_expl_req( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(mode == LOCK_X || mode == LOCK_S); ut_ad(gap == 0 || gap == LOCK_GAP); ut_ad(wait == 0 || wait == LOCK_WAIT); @@ -1594,9 +1530,8 @@ lock_rec_other_has_conflicting( trx_t* trx) /* in: our transaction */ { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -1629,9 +1564,7 @@ lock_rec_find_similar_on_page( lock_t* lock; ulint heap_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(rec, page_rec_is_comp(rec)); lock = lock_rec_get_first_on_page(rec); @@ -1665,9 +1598,7 @@ lock_sec_rec_some_has_impl_off_kernel( { page_t* page; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(!(index->type & DICT_CLUSTERED)); ut_ad(page_rec_is_user_rec(rec)); ut_ad(rec_offs_validate(rec, index, offsets)); @@ -1760,9 +1691,7 @@ lock_rec_create( ulint n_bits; ulint n_bytes; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ page = buf_frame_align(rec); space = buf_frame_get_space_id(page); @@ -1842,9 +1771,7 @@ lock_rec_enqueue_waiting( lock_t* lock; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* Test if there already is some other reason to suspend thread: we do not enqueue a lock request if the query thread should be @@ -1934,9 +1861,7 @@ lock_rec_add_to_queue( ulint heap_no; ibool somebody_waits = FALSE; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((type_mode & (LOCK_WAIT | LOCK_GAP)) || ((type_mode & LOCK_MODE_MASK) != LOCK_S) || !lock_rec_other_has_expl_req(LOCK_X, 0, LOCK_WAIT, @@ -2017,9 +1942,7 @@ lock_rec_lock_fast( ulint heap_no; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -2102,9 +2025,7 @@ lock_rec_lock_slow( trx_t* trx; ulint err; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -2176,9 +2097,7 @@ lock_rec_lock( { ulint err; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((LOCK_MODE_MASK & mode) != LOCK_S || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS)); ut_ad((LOCK_MODE_MASK & mode) != LOCK_X @@ -2216,9 +2135,7 @@ lock_rec_has_to_wait_in_queue( ulint page_no; ulint heap_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_wait(wait_lock)); ut_ad(lock_get_type(wait_lock) == LOCK_REC); @@ -2251,9 +2168,7 @@ lock_grant( /*=======*/ lock_t* lock) /* in: waiting lock request */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock_reset_lock_and_trx_wait(lock); @@ -2298,9 +2213,7 @@ lock_rec_cancel( /*============*/ lock_t* lock) /* in: waiting record lock request */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(lock) == LOCK_REC); /* Reset the bit (there can be only one set bit) in the lock bitmap */ @@ -2333,9 +2246,7 @@ lock_rec_dequeue_from_page( lock_t* lock; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); trx = in_lock->trx; @@ -2378,9 +2289,7 @@ lock_rec_discard( ulint page_no; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(lock_get_type(in_lock) == LOCK_REC); trx = in_lock->trx; @@ -2409,9 +2318,7 @@ lock_rec_free_all_from_discard_page( lock_t* lock; lock_t* next_lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ space = buf_frame_get_space_id(page); page_no = buf_frame_get_page_no(page); @@ -2444,9 +2351,7 @@ lock_rec_reset_and_release_wait( lock_t* lock; ulint heap_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(rec, page_rec_is_comp(rec)); @@ -2477,9 +2382,8 @@ lock_rec_inherit_to_gap( the locks on this record */ { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -2518,9 +2422,8 @@ lock_rec_inherit_to_gap_if_gap_lock( the locks on this record */ { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = lock_rec_get_first(rec); @@ -2554,9 +2457,7 @@ lock_rec_move( ulint heap_no; ulint type_mode; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ heap_no = rec_get_heap_no(donator, comp); @@ -3227,10 +3128,9 @@ lock_deadlock_occurs( ulint ret; ulint cost = 0; - ut_ad(trx && lock); -#ifdef UNIV_SYNC_DEBUG + ut_ad(trx); + ut_ad(lock); ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ retry: /* We check that adding this trx to the waits-for graph does not produce a cycle. First mark all active transactions @@ -3301,10 +3201,10 @@ lock_deadlock_recursive( trx_t* lock_trx; ulint ret; - ut_a(trx && start && wait_lock); -#ifdef UNIV_SYNC_DEBUG + ut_a(trx); + ut_a(start); + ut_a(wait_lock); ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (trx->deadlock_mark == 1) { /* We have already exhaustively searched the subtree starting @@ -3315,12 +3215,6 @@ lock_deadlock_recursive( *cost = *cost + 1; - if ((depth > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK) - || (*cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK)) { - - return(LOCK_VICTIM_IS_START); - } - lock = wait_lock; if (lock_get_type(wait_lock) == LOCK_REC) { @@ -3353,11 +3247,18 @@ lock_deadlock_recursive( if (lock_has_to_wait(wait_lock, lock)) { + ibool too_far + = depth > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK + || *cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK; + lock_trx = lock->trx; - if (lock_trx == start) { + if (lock_trx == start || too_far) { + /* We came back to the recursion starting - point: a deadlock detected */ + point: a deadlock detected; or we have + searched the waits-for graph too long */ + FILE* ef = lock_latest_err_file; rewind(ef); @@ -3399,11 +3300,22 @@ lock_deadlock_recursive( } #ifdef UNIV_DEBUG if (lock_print_waits) { - fputs("Deadlock detected\n", stderr); + fputs("Deadlock detected" + " or too long search\n", + stderr); } #endif /* UNIV_DEBUG */ - if (ut_dulint_cmp(wait_lock->trx->undo_no, - start->undo_no) >= 0) { + if (too_far) { + + fputs("TOO DEEP OR LONG SEARCH" + " IN THE LOCK TABLE" + " WAITS-FOR GRAPH\n", ef); + + return(LOCK_VICTIM_IS_START); + } + + if (trx_weight_cmp(wait_lock->trx, + start) >= 0) { /* Our recursion starting point transaction is 'smaller', let us choose 'start' as the victim and roll @@ -3472,9 +3384,11 @@ lock_table_create( lock_t* lock; ut_ad(table && trx); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + + if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) { + ++table->n_waiting_or_granted_auto_inc_locks; + } if (type_mode == LOCK_AUTO_INC) { /* Only one trx can have the lock on the table @@ -3519,15 +3433,16 @@ lock_table_remove_low( dict_table_t* table; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ table = lock->un_member.tab_lock.table; trx = lock->trx; if (lock == trx->auto_inc_lock) { trx->auto_inc_lock = NULL; + + ut_a(table->n_waiting_or_granted_auto_inc_locks > 0); + --table->n_waiting_or_granted_auto_inc_locks; } UT_LIST_REMOVE(trx_locks, trx->trx_locks, lock); @@ -3555,9 +3470,7 @@ lock_table_enqueue_waiting( lock_t* lock; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* Test if there already is some other reason to suspend thread: we do not enqueue a lock request if the query thread should be @@ -3630,9 +3543,7 @@ lock_table_other_has_incompatible( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(table->locks); @@ -3786,9 +3697,7 @@ lock_table_dequeue( { lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_a(lock_get_type(in_lock) == LOCK_TABLE); lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock); @@ -3930,9 +3839,7 @@ lock_release_off_kernel( ulint count; lock_t* lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(trx->trx_locks); @@ -3993,9 +3900,7 @@ lock_cancel_waiting_and_release( /*============================*/ lock_t* lock) /* in: waiting lock request */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (lock_get_type(lock) == LOCK_REC) { @@ -4028,9 +3933,7 @@ lock_reset_all_on_table_for_trx( lock_t* lock; lock_t* prev_lock; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ lock = UT_LIST_GET_LAST(trx->trx_locks); @@ -4091,9 +3994,7 @@ lock_table_print( FILE* file, /* in: file where to print */ lock_t* lock) /* in: table type lock */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_a(lock_get_type(lock) == LOCK_TABLE); fputs("TABLE LOCK table ", file); @@ -4143,9 +4044,7 @@ lock_rec_print( ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_a(lock_get_type(lock) == LOCK_REC); space = lock->un_member.rec_lock.space; @@ -4250,9 +4149,7 @@ lock_get_n_rec_locks(void) ulint n_locks = 0; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) { @@ -4490,13 +4387,8 @@ lock_table_queue_validate( dict_table_t* table) /* in: table */ { lock_t* lock; - ibool is_waiting; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ - - is_waiting = FALSE; lock = UT_LIST_GET_FIRST(table->locks); @@ -4507,13 +4399,10 @@ lock_table_queue_validate( if (!lock_get_wait(lock)) { - ut_a(!is_waiting); - ut_a(!lock_table_other_has_incompatible( lock->trx, 0, table, lock_get_mode(lock))); } else { - is_waiting = TRUE; ut_a(lock_table_has_to_wait_in_queue(lock)); } @@ -4665,9 +4554,7 @@ lock_rec_validate_page( ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); @@ -4943,9 +4830,7 @@ lock_rec_convert_impl_to_expl( { trx_t* impl_trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(page_rec_is_user_rec(rec)); ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets)); diff --git a/storage/innobase/log/Makefile.am b/storage/innobase/log/Makefile.am deleted file mode 100644 index a40572a64da..00000000000 --- a/storage/innobase/log/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = liblog.a - -liblog_a_SOURCES = log0log.c log0recv.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/log/log0log.c b/storage/innobase/log/log0log.c index 5d8875f1bd0..b10c348b24d 100644 --- a/storage/innobase/log/log0log.c +++ b/storage/innobase/log/log0log.c @@ -165,9 +165,7 @@ log_buf_pool_get_oldest_modification(void) { dulint lsn; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ lsn = buf_pool_get_oldest_modification(); @@ -269,9 +267,7 @@ log_write_low( ulint data_len; byte* log_block; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log->mutex))); -#endif /* UNIV_SYNC_DEBUG */ part_loop: /* Calculate a part length */ @@ -340,9 +336,7 @@ log_close(void) log_t* log = log_sys; ulint checkpoint_age; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log->mutex))); -#endif /* UNIV_SYNC_DEBUG */ lsn = log->lsn; @@ -464,9 +458,7 @@ log_group_get_capacity( /* out: capacity in bytes */ log_group_t* group) /* in: log group */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ return((group->file_size - LOG_FILE_HDR_SIZE) * group->n_files); } @@ -482,9 +474,7 @@ log_group_calc_size_offset( ulint offset, /* in: real offset within the log group */ log_group_t* group) /* in: log group */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ return(offset - LOG_FILE_HDR_SIZE * (1 + offset / group->file_size)); } @@ -500,9 +490,7 @@ log_group_calc_real_offset( ulint offset, /* in: size offset within the log group */ log_group_t* group) /* in: log group */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ return(offset + LOG_FILE_HDR_SIZE * (1 + offset / (group->file_size - LOG_FILE_HDR_SIZE))); @@ -525,9 +513,7 @@ log_group_calc_lsn_offset( ib_longlong group_size; ib_longlong offset; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* If total log file size is > 2 GB we can easily get overflows with 32-bit integers. Use 64-bit integers instead. */ @@ -642,9 +628,7 @@ log_calc_max_ages(void) ulint archive_margin; ulint smallest_archive_margin; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ mutex_enter(&(log_sys->mutex)); @@ -942,9 +926,7 @@ log_flush_do_unlocks( ulint code) /* in: any ORed combination of LOG_UNLOCK_FLUSH_LOCK and LOG_UNLOCK_NONE_FLUSHED_LOCK */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* NOTE that we must own the log mutex when doing the setting of the events: this is because transactions will wait for these events to @@ -976,9 +958,7 @@ log_group_check_flush_completion( /* out: LOG_UNLOCK_NONE_FLUSHED_LOCK or 0 */ log_group_t* group) /* in: log group */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (!log_sys->one_flushed && group->n_pending_writes == 0) { #ifdef UNIV_DEBUG @@ -1015,9 +995,7 @@ log_sys_check_flush_completion(void) ulint move_start; ulint move_end; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (log_sys->n_pending_writes == 0) { @@ -1129,10 +1107,8 @@ log_group_file_header_flush( { byte* buf; ulint dest_offset; -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&(log_sys->mutex))); ut_a(nth_file < group->n_files); buf = *(group->file_header_bufs + nth_file); @@ -1203,9 +1179,7 @@ log_group_write_buf( ulint next_offset; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(len % OS_FILE_LOG_BLOCK_SIZE == 0); ut_a(ut_dulint_get_low(start_lsn) % OS_FILE_LOG_BLOCK_SIZE == 0); @@ -1626,9 +1600,7 @@ void log_complete_checkpoint(void) /*=========================*/ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(log_sys->n_pending_checkpoint_writes == 0); log_sys->next_checkpoint_no @@ -1715,9 +1687,7 @@ log_group_checkpoint( byte* buf; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ #if LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE # error "LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE" #endif @@ -1882,9 +1852,7 @@ log_group_read_checkpoint_info( log_group_t* group, /* in: log group */ ulint field) /* in: LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ log_sys->n_log_ios++; @@ -1902,9 +1870,7 @@ log_groups_write_checkpoint_info(void) { log_group_t* group; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ group = UT_LIST_GET_FIRST(log_sys->log_groups); @@ -2162,9 +2128,7 @@ log_group_read_log_seg( ulint source_offset; ibool sync; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ sync = FALSE; @@ -2237,9 +2201,7 @@ log_group_archive_file_header_write( byte* buf; ulint dest_offset; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(nth_file < group->n_files); @@ -2276,9 +2238,7 @@ log_group_archive_completed_header_write( byte* buf; ulint dest_offset; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_a(nth_file < group->n_files); buf = *(group->archive_file_header_bufs + nth_file); @@ -2317,9 +2277,7 @@ log_group_archive( ulint n_files; ulint open_mode; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ start_lsn = log_sys->archived_lsn; @@ -2452,9 +2410,7 @@ log_archive_groups(void) { log_group_t* group; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ group = UT_LIST_GET_FIRST(log_sys->log_groups); @@ -2477,9 +2433,7 @@ log_archive_write_complete_groups(void) dulint end_lsn; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ group = UT_LIST_GET_FIRST(log_sys->log_groups); @@ -2546,9 +2500,7 @@ void log_archive_check_completion_low(void) /*==================================*/ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (log_sys->n_pending_archive_ios == 0 && log_sys->archiving_phase == LOG_ARCHIVE_READ) { @@ -2784,9 +2736,7 @@ log_archive_close_groups( log_group_t* group; ulint trunc_len; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (log_sys->archiving_state == LOG_ARCH_OFF) { @@ -3089,10 +3039,22 @@ loop: mutex_enter(&kernel_mutex); - /* Check that there are no longer transactions. We need this wait - even for the 'very fast' shutdown, because the InnoDB layer may have - committed or prepared transactions and we don't want to lose - them. */ + /* We need the monitor threads to stop before we proceed with a + normal shutdown. In case of very fast shutdown, however, we can + proceed without waiting for monitor threads. */ + + if (srv_fast_shutdown < 2 + && (srv_error_monitor_active + || srv_lock_timeout_and_monitor_active)) { + + mutex_exit(&kernel_mutex); + + goto loop; + } + + /* Check that there are no longer transactions. We need this wait even + for the 'very fast' shutdown, because the InnoDB layer may have + committed or prepared transactions and we don't want to lose them. */ if (trx_n_mysql_transactions > 0 || UT_LIST_GET_LEN(trx_sys->trx_list) > 0) { @@ -3213,22 +3175,8 @@ loop: goto loop; } - /* The lock timeout thread should now have exited */ - - if (srv_lock_timeout_and_monitor_active) { - - goto loop; - } - - /* We now let also the InnoDB error monitor thread to exit */ - srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE; - if (srv_error_monitor_active) { - - goto loop; - } - /* Make some checks that the server really is quiet */ ut_a(srv_n_threads_active[SRV_MASTER] == 0); ut_a(buf_all_freed()); @@ -3278,9 +3226,7 @@ log_check_log_recs( byte* buf1; byte* scan_buf; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (len == 0) { @@ -3322,7 +3268,7 @@ log_peek_lsn( log system mutex */ dulint* lsn) /* out: if returns TRUE, current lsn is here */ { - if (0 == mutex_enter_nowait(&(log_sys->mutex), __FILE__, __LINE__)) { + if (0 == mutex_enter_nowait(&(log_sys->mutex))) { *lsn = log_sys->lsn; mutex_exit(&(log_sys->mutex)); diff --git a/storage/innobase/log/log0recv.c b/storage/innobase/log/log0recv.c index 41e2b921664..aef58b7b576 100644 --- a/storage/innobase/log/log0recv.c +++ b/storage/innobase/log/log0recv.c @@ -57,6 +57,16 @@ ibool recv_needed_recovery = FALSE; ibool recv_lsn_checks_on = FALSE; +/* There are two conditions under which we scan the logs, the first +is normal startup and the second is when we do a recovery from an +archive. +This flag is set if we are doing a scan from the last checkpoint during +startup. If we find log entries that were written after the last checkpoint +we know that the server was not cleanly shutdown. We must then initialize +the crash recovery environment before attempting to store these entries in +the log hash table. */ +ibool recv_log_scan_is_startup_type = FALSE; + /* If the following is TRUE, the buffer pool file pages must be invalidated after recovery and no ibuf operations are allowed; this becomes TRUE if the log record hash table becomes too full, and log records must be merged @@ -99,6 +109,16 @@ the recovery failed and the database may be corrupt. */ dulint recv_max_page_lsn; +/* prototypes */ + +/*********************************************************** +Initialize crash recovery environment. Can be called iff +recv_needed_recovery == FALSE. */ +static +void +recv_init_crash_recovery(void); +/*===========================*/ + /************************************************************ Creates the recovery system. */ @@ -171,9 +191,8 @@ void recv_sys_empty_hash(void) /*=====================*/ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(recv_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + if (recv_sys->n_addrs != 0) { fprintf(stderr, "InnoDB: Error: %lu pages with log records" @@ -1396,9 +1415,8 @@ loop: goto loop; } -#ifdef UNIV_SYNC_DEBUG ut_ad(!allow_ibuf == mutex_own(&log_sys->mutex)); -#endif /* UNIV_SYNC_DEBUG */ + if (!allow_ibuf) { recv_no_ibuf_operations = TRUE; } @@ -1842,9 +1860,7 @@ recv_parse_log_recs( byte* body; ulint n_recs; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(!ut_dulint_is_zero(recv_sys->parse_start_lsn)); loop: ptr = recv_sys->buf + recv_sys->recovered_offset; @@ -2288,6 +2304,23 @@ recv_scan_log_recs( if (ut_dulint_cmp(scanned_lsn, recv_sys->scanned_lsn) > 0) { + /* We have found more entries. If this scan is + of startup type, we must initiate crash recovery + environment before parsing these log records. */ + + if (recv_log_scan_is_startup_type + && !recv_needed_recovery) { + + fprintf(stderr, + "InnoDB: Log scan progressed" + " past the checkpoint lsn %lu %lu\n", + (ulong) ut_dulint_get_high( + recv_sys->scanned_lsn), + (ulong) ut_dulint_get_low( + recv_sys->scanned_lsn)); + recv_init_crash_recovery(); + } + /* We were able to find more log data: add it to the parsing buffer if parse_start_lsn is already non-zero */ @@ -2409,6 +2442,47 @@ recv_group_scan_log_recs( #endif /* UNIV_DEBUG */ } +/*********************************************************** +Initialize crash recovery environment. Can be called iff +recv_needed_recovery == FALSE. */ +static +void +recv_init_crash_recovery(void) +/*==========================*/ +{ + ut_a(!recv_needed_recovery); + + recv_needed_recovery = TRUE; + + ut_print_timestamp(stderr); + + fprintf(stderr, + " InnoDB: Database was not" + " shut down normally!\n" + "InnoDB: Starting crash recovery.\n"); + + fprintf(stderr, + "InnoDB: Reading tablespace information" + " from the .ibd files...\n"); + + fil_load_single_table_tablespaces(); + + /* If we are using the doublewrite method, we will + check if there are half-written pages in data files, + and restore them from the doublewrite buffer if + possible */ + + if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { + + fprintf(stderr, + "InnoDB: Restoring possible" + " half-written data pages from" + " the doublewrite\n" + "InnoDB: buffer...\n"); + trx_sys_doublewrite_init_or_restore_pages(TRUE); + } +} + /************************************************************ Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function @@ -2536,92 +2610,6 @@ recv_recovery_from_checkpoint_start( recv_sys->recovered_lsn = checkpoint_lsn; srv_start_lsn = checkpoint_lsn; - - /* NOTE: we always do a 'recovery' at startup, but only if - there is something wrong we will print a message to the - user about recovery: */ - - if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) != 0 - || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) { - - if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) - < 0) { - fprintf(stderr, - "InnoDB: #########################" - "#################################\n" - "InnoDB: " - "WARNING!\n" - "InnoDB: The log sequence number" - " in ibdata files is higher\n" - "InnoDB: than the log sequence number" - " in the ib_logfiles! Are you sure\n" - "InnoDB: you are using the right" - " ib_logfiles to start up" - " the database?\n" - "InnoDB: Log sequence number in" - " ib_logfiles is %lu %lu, log\n" - "InnoDB: sequence numbers stamped" - " to ibdata file headers are between\n" - "InnoDB: %lu %lu and %lu %lu.\n" - "InnoDB: #########################" - "#################################\n", - (ulong) ut_dulint_get_high( - checkpoint_lsn), - (ulong) ut_dulint_get_low( - checkpoint_lsn), - (ulong) ut_dulint_get_high( - min_flushed_lsn), - (ulong) ut_dulint_get_low( - min_flushed_lsn), - (ulong) ut_dulint_get_high( - max_flushed_lsn), - (ulong) ut_dulint_get_low( - max_flushed_lsn)); - } - - recv_needed_recovery = TRUE; - - ut_print_timestamp(stderr); - - fprintf(stderr, - " InnoDB: Database was not" - " shut down normally!\n" - "InnoDB: Starting crash recovery.\n"); - - fprintf(stderr, - "InnoDB: Reading tablespace information" - " from the .ibd files...\n"); - - fil_load_single_table_tablespaces(); - - /* If we are using the doublewrite method, we will - check if there are half-written pages in data files, - and restore them from the doublewrite buffer if - possible */ - - if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { - - fprintf(stderr, - "InnoDB: Restoring possible" - " half-written data pages from" - " the doublewrite\n" - "InnoDB: buffer...\n"); - trx_sys_doublewrite_init_or_restore_pages( - TRUE); - } - - ut_print_timestamp(stderr); - - fprintf(stderr, - " InnoDB: Starting log scan" - " based on checkpoint at\n" - "InnoDB: log sequence number %lu %lu.\n", - (ulong) ut_dulint_get_high(checkpoint_lsn), - (ulong) ut_dulint_get_low(checkpoint_lsn)); - } else { - /* Init the doublewrite buffer memory structure */ - trx_sys_doublewrite_init_or_restore_pages(FALSE); - } } contiguous_lsn = ut_dulint_align_down(recv_sys->scanned_lsn, @@ -2674,6 +2662,8 @@ recv_recovery_from_checkpoint_start( group = UT_LIST_GET_NEXT(log_groups, group); } + /* Set the flag to publish that we are doing startup scan. */ + recv_log_scan_is_startup_type = (type == LOG_CHECKPOINT); while (group) { old_scanned_lsn = recv_sys->scanned_lsn; @@ -2695,6 +2685,69 @@ recv_recovery_from_checkpoint_start( group = UT_LIST_GET_NEXT(log_groups, group); } + /* Done with startup scan. Clear the flag. */ + recv_log_scan_is_startup_type = FALSE; + if (type == LOG_CHECKPOINT) { + /* NOTE: we always do a 'recovery' at startup, but only if + there is something wrong we will print a message to the + user about recovery: */ + + if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) != 0 + || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) { + + if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) + < 0) { + fprintf(stderr, + "InnoDB: #########################" + "#################################\n" + "InnoDB: " + "WARNING!\n" + "InnoDB: The log sequence number" + " in ibdata files is higher\n" + "InnoDB: than the log sequence number" + " in the ib_logfiles! Are you sure\n" + "InnoDB: you are using the right" + " ib_logfiles to start up" + " the database?\n" + "InnoDB: Log sequence number in" + " ib_logfiles is %lu %lu, log\n" + "InnoDB: sequence numbers stamped" + " to ibdata file headers are between\n" + "InnoDB: %lu %lu and %lu %lu.\n" + "InnoDB: #########################" + "#################################\n", + (ulong) ut_dulint_get_high( + checkpoint_lsn), + (ulong) ut_dulint_get_low( + checkpoint_lsn), + (ulong) ut_dulint_get_high( + min_flushed_lsn), + (ulong) ut_dulint_get_low( + min_flushed_lsn), + (ulong) ut_dulint_get_high( + max_flushed_lsn), + (ulong) ut_dulint_get_low( + max_flushed_lsn)); + + + } + + if (!recv_needed_recovery) { + fprintf(stderr, + "InnoDB: The log sequence number" + " in ibdata files does not match\n" + "InnoDB: the log sequence number" + " in the ib_logfiles!\n"); + recv_init_crash_recovery(); + } + + } + if (!recv_needed_recovery) { + /* Init the doublewrite buffer memory structure */ + trx_sys_doublewrite_init_or_restore_pages(FALSE); + } + } + /* We currently have only one log group */ if (ut_dulint_cmp(group_scanned_lsn, checkpoint_lsn) < 0) { ut_print_timestamp(stderr); @@ -2751,20 +2804,9 @@ recv_recovery_from_checkpoint_start( recv_synchronize_groups(up_to_date_group); if (!recv_needed_recovery) { - if (ut_dulint_cmp(checkpoint_lsn, recv_sys->recovered_lsn) - != 0) { - fprintf(stderr, - "InnoDB: Warning: we did not need to do" - " crash recovery, but log scan\n" - "InnoDB: progressed past the checkpoint" - " lsn %lu %lu up to lsn %lu %lu\n", - (ulong) ut_dulint_get_high(checkpoint_lsn), - (ulong) ut_dulint_get_low(checkpoint_lsn), - (ulong) ut_dulint_get_high( - recv_sys->recovered_lsn), - (ulong) ut_dulint_get_low( - recv_sys->recovered_lsn)); - } + ut_a(ut_dulint_cmp(checkpoint_lsn, + recv_sys->recovered_lsn) == 0); + } else { srv_start_lsn = recv_sys->recovered_lsn; } @@ -2894,9 +2936,8 @@ recv_reset_logs( { log_group_t* group; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(log_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + log_sys->lsn = ut_dulint_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); group = UT_LIST_GET_FIRST(log_sys->log_groups); diff --git a/storage/innobase/mach/Makefile.am b/storage/innobase/mach/Makefile.am deleted file mode 100644 index 1a59cb3e4d7..00000000000 --- a/storage/innobase/mach/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libmach.a - -libmach_a_SOURCES = mach0data.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/mem/Makefile.am b/storage/innobase/mem/Makefile.am deleted file mode 100644 index 598dbb96124..00000000000 --- a/storage/innobase/mem/Makefile.am +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libmem.a - -libmem_a_SOURCES = mem0mem.c mem0pool.c - -EXTRA_DIST = mem0dbg.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/mem/mem0mem.c b/storage/innobase/mem/mem0mem.c index 10b359e8e67..d89a3a55d88 100644 --- a/storage/innobase/mem/mem0mem.c +++ b/storage/innobase/mem/mem0mem.c @@ -514,6 +514,7 @@ mem_heap_block_free( mem_erase_buf((byte*)block, len); #endif + UNIV_MEM_FREE(block, len); if (init_block) { /* Do not have to free: do nothing */ diff --git a/storage/innobase/mem/mem0pool.c b/storage/innobase/mem/mem0pool.c index a7acd331e16..27da86a0309 100644 --- a/storage/innobase/mem/mem0pool.c +++ b/storage/innobase/mem/mem0pool.c @@ -229,6 +229,8 @@ mem_pool_create( mem_area_set_size(area, ut_2_exp(i)); mem_area_set_free(area, TRUE); + UNIV_MEM_FREE(MEM_AREA_EXTRA_SIZE + (byte*) area, + ut_2_exp(i) - MEM_AREA_EXTRA_SIZE); UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area); @@ -257,9 +259,7 @@ mem_pool_fill_free_list( mem_area_t* area2; ibool ret; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(pool->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (i >= 63) { /* We come here when we have run out of space in the @@ -302,6 +302,7 @@ mem_pool_fill_free_list( UT_LIST_REMOVE(free_list, pool->free_list[i + 1], area); area2 = (mem_area_t*)(((byte*)area) + ut_2_exp(i)); + UNIV_MEM_ALLOC(area2, MEM_AREA_EXTRA_SIZE); mem_area_set_size(area2, ut_2_exp(i)); mem_area_set_free(area2, TRUE); @@ -402,6 +403,8 @@ mem_area_alloc( mutex_exit(&(pool->mutex)); ut_ad(mem_pool_validate(pool)); + UNIV_MEM_ALLOC(MEM_AREA_EXTRA_SIZE + (byte*)area, + ut_2_exp(n) - MEM_AREA_EXTRA_SIZE); return((void*)(MEM_AREA_EXTRA_SIZE + ((byte*)area))); } @@ -484,6 +487,7 @@ mem_area_free( } size = mem_area_get_size(area); + UNIV_MEM_FREE(ptr, size - MEM_AREA_EXTRA_SIZE); if (size == 0) { fprintf(stderr, diff --git a/storage/innobase/mtr/Makefile.am b/storage/innobase/mtr/Makefile.am deleted file mode 100644 index 80eb7c907be..00000000000 --- a/storage/innobase/mtr/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libmtr.a - -libmtr_a_SOURCES = mtr0mtr.c mtr0log.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/mtr/mtr0log.c b/storage/innobase/mtr/mtr0log.c index cb03f207a56..e5d572bbfa7 100644 --- a/storage/innobase/mtr/mtr0log.c +++ b/storage/innobase/mtr/mtr0log.c @@ -517,8 +517,9 @@ mlog_parse_index( n = mach_read_from_2(ptr); ptr += 2; n_uniq = mach_read_from_2(ptr); + ptr += 2; ut_ad(n_uniq <= n); - if (end_ptr < ptr + (n + 1) * 2) { + if (end_ptr < ptr + n * 2) { return(NULL); } } else { @@ -529,20 +530,20 @@ mlog_parse_index( ind = dict_mem_index_create("LOG_DUMMY", "LOG_DUMMY", DICT_HDR_SPACE, 0, n); ind->table = table; - ind->n_uniq = n_uniq; + ind->n_uniq = (unsigned int) n_uniq; if (n_uniq != n) { + ut_a(n_uniq + DATA_ROLL_PTR <= n); ind->type = DICT_CLUSTERED; } - /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */ - ind->cached = TRUE; if (comp) { for (i = 0; i < n; i++) { - ulint len = mach_read_from_2(ptr += 2); + ulint len = mach_read_from_2(ptr); + ptr += 2; /* The high-order bit of len is the NOT NULL flag; the rest is 0 or 0x7fff for variable-length fields, and 1..0x7ffe for fixed-length fields. */ dict_mem_table_add_col( - table, "DUMMY", + table, NULL, NULL, ((len + 1) & 0x7fff) <= 1 ? DATA_BINARY : DATA_FIXBINARY, len & 0x8000 ? DATA_NOT_NULL : 0, @@ -552,8 +553,23 @@ mlog_parse_index( dict_table_get_nth_col(table, i), 0); } - ptr += 2; + dict_table_add_system_columns(table, table->heap); + if (n_uniq != n) { + /* Identify DB_TRX_ID and DB_ROLL_PTR in the index. */ + ut_a(DATA_TRX_ID_LEN + == dict_index_get_nth_col(ind, DATA_TRX_ID - 1 + + n_uniq)->len); + ut_a(DATA_ROLL_PTR_LEN + == dict_index_get_nth_col(ind, DATA_ROLL_PTR - 1 + + n_uniq)->len); + ind->fields[DATA_TRX_ID - 1 + n_uniq].col + = &table->cols[n + DATA_TRX_ID]; + ind->fields[DATA_ROLL_PTR - 1 + n_uniq].col + = &table->cols[n + DATA_ROLL_PTR]; + } } + /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */ + ind->cached = TRUE; *index = ind; return(ptr); } diff --git a/storage/innobase/os/Makefile.am b/storage/innobase/os/Makefile.am deleted file mode 100644 index d5c45eba54e..00000000000 --- a/storage/innobase/os/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libos.a - -libos_a_SOURCES = os0proc.c os0sync.c os0thread.c os0file.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c index c4d051ec771..78140cc5ecf 100644 --- a/storage/innobase/os/os0file.c +++ b/storage/innobase/os/os0file.c @@ -250,6 +250,15 @@ os_file_get_last_error( "InnoDB: the directory. It may also be" " you have created a subdirectory\n" "InnoDB: of the same name as a data file.\n"); + } else if (err == ERROR_SHARING_VIOLATION + || err == ERROR_LOCK_VIOLATION) { + fprintf(stderr, + "InnoDB: The error means that another program" + " is using InnoDB's files.\n" + "InnoDB: This might be a backup or antivirus" + " software or another instance\n" + "InnoDB: of MySQL." + " Please close it to get rid of this error.\n"); } else { fprintf(stderr, "InnoDB: Some operating system error numbers" @@ -268,6 +277,9 @@ os_file_get_last_error( return(OS_FILE_DISK_FULL); } else if (err == ERROR_FILE_EXISTS) { return(OS_FILE_ALREADY_EXISTS); + } else if (err == ERROR_SHARING_VIOLATION + || err == ERROR_LOCK_VIOLATION) { + return(OS_FILE_SHARING_VIOLATION); } else { return(100 + err); } @@ -318,7 +330,7 @@ os_file_get_last_error( fflush(stderr); - if (err == ENOSPC ) { + if (err == ENOSPC) { return(OS_FILE_DISK_FULL); #ifdef POSIX_ASYNC_IO } else if (err == EAGAIN) { @@ -337,15 +349,20 @@ os_file_get_last_error( } /******************************************************************** -Does error handling when a file operation fails. */ +Does error handling when a file operation fails. +Conditionally exits (calling exit(3)) based on should_exit value and the +error type */ + static ibool -os_file_handle_error( -/*=================*/ - /* out: TRUE if we should retry the - operation */ - const char* name, /* in: name of a file or NULL */ - const char* operation)/* in: operation */ +os_file_handle_error_cond_exit( +/*===========================*/ + /* out: TRUE if we should retry the + operation */ + const char* name, /* in: name of a file or NULL */ + const char* operation, /* in: operation */ + ibool should_exit) /* in: call exit(3) if unknown error + and this parameter is TRUE */ { ulint err; @@ -376,15 +393,17 @@ os_file_handle_error( fflush(stderr); return(FALSE); - } else if (err == OS_FILE_AIO_RESOURCES_RESERVED) { return(TRUE); - } else if (err == OS_FILE_ALREADY_EXISTS || err == OS_FILE_PATH_ERROR) { return(FALSE); + } else if (err == OS_FILE_SHARING_VIOLATION) { + + os_thread_sleep(10000000); /* 10 sec */ + return(TRUE); } else { if (name) { fprintf(stderr, "InnoDB: File name %s\n", name); @@ -392,22 +411,54 @@ os_file_handle_error( fprintf(stderr, "InnoDB: File operation call: '%s'.\n", operation); - fprintf(stderr, "InnoDB: Cannot continue operation.\n"); - fflush(stderr); + if (should_exit) { + fprintf(stderr, "InnoDB: Cannot continue operation.\n"); - exit(1); + fflush(stderr); + + exit(1); + } } return(FALSE); } +/******************************************************************** +Does error handling when a file operation fails. */ +static +ibool +os_file_handle_error( +/*=================*/ + /* out: TRUE if we should retry the + operation */ + const char* name, /* in: name of a file or NULL */ + const char* operation)/* in: operation */ +{ + /* exit in case of unknown error */ + return(os_file_handle_error_cond_exit(name, operation, TRUE)); +} + +/******************************************************************** +Does error handling when a file operation fails. */ +static +ibool +os_file_handle_error_no_exit( +/*=========================*/ + /* out: TRUE if we should retry the + operation */ + const char* name, /* in: name of a file or NULL */ + const char* operation)/* in: operation */ +{ + /* don't exit in case of unknown error */ + return(os_file_handle_error_cond_exit(name, operation, FALSE)); +} + #undef USE_FILE_LOCK #define USE_FILE_LOCK -#if defined(UNIV_HOTBACKUP) || defined(__WIN__) || defined(__FreeBSD__) || defined(__NETWARE__) +#if defined(UNIV_HOTBACKUP) || defined(__WIN__) || defined(__NETWARE__) /* InnoDB Hot Backup does not lock the data files. * On Windows, mandatory locking is used. - * On FreeBSD with LinuxThreads, advisory locking does not work properly. */ # undef USE_FILE_LOCK #endif @@ -446,68 +497,6 @@ os_file_lock( #endif /* USE_FILE_LOCK */ /******************************************************************** -Does error handling when a file operation fails. */ -static -ibool -os_file_handle_error_no_exit( -/*=========================*/ - /* out: TRUE if we should retry the - operation */ - const char* name, /* in: name of a file or NULL */ - const char* operation)/* in: operation */ -{ - ulint err; - - err = os_file_get_last_error(FALSE); - - if (err == OS_FILE_DISK_FULL) { - /* We only print a warning about disk full once */ - - if (os_has_said_disk_full) { - - return(FALSE); - } - - if (name) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Encountered a problem with" - " file %s\n", name); - } - - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Disk is full. Try to clean the disk" - " to free space.\n"); - - os_has_said_disk_full = TRUE; - - fflush(stderr); - - return(FALSE); - - } else if (err == OS_FILE_AIO_RESOURCES_RESERVED) { - - return(TRUE); - - } else if (err == OS_FILE_ALREADY_EXISTS - || err == OS_FILE_PATH_ERROR) { - - return(FALSE); - } else { - if (name) { - fprintf(stderr, "InnoDB: File name %s\n", name); - } - - fprintf(stderr, "InnoDB: File operation call: '%s'.\n", - operation); - return (FALSE); - } - - return(FALSE); /* not reached */ -} - -/******************************************************************** Creates the seek mutexes used in positioned reads and writes. */ void @@ -930,7 +919,7 @@ try_again: file = CreateFile((LPCTSTR) name, access, FILE_SHARE_READ | FILE_SHARE_WRITE, - /* file can be read ansd written also + /* file can be read and written also by other processes */ NULL, /* default security attributes */ create_flag, @@ -1125,6 +1114,51 @@ os_file_create_simple_no_error_handling( } /******************************************************************** +Tries to disable OS caching on an opened file descriptor. */ + +void +os_file_set_nocache( +/*================*/ + int fd, /* in: file descriptor to alter */ + const char* file_name, /* in: used in the diagnostic message */ + const char* operation_name) /* in: used in the diagnostic message, + we call os_file_set_nocache() + immediately after opening or creating + a file, so this is either "open" or + "create" */ +{ + /* some versions of Solaris may not have DIRECTIO_ON */ +#if defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) + if (directio(fd, DIRECTIO_ON) == -1) { + int errno_save; + errno_save = (int)errno; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Failed to set DIRECTIO_ON " + "on file %s: %s: %s, continuing anyway\n", + file_name, operation_name, strerror(errno_save)); + } +#elif defined(O_DIRECT) + if (fcntl(fd, F_SETFL, O_DIRECT) == -1) { + int errno_save; + errno_save = (int)errno; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Failed to set O_DIRECT " + "on file %s: %s: %s, continuing anyway\n", + file_name, operation_name, strerror(errno_save)); + if (errno_save == EINVAL) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: O_DIRECT is known to result in " + "'Invalid argument' on Linux on tmpfs, " + "see MySQL Bug#26662\n"); + } + } +#endif +} + +/******************************************************************** Opens an existing file or creates a new. */ os_file_t @@ -1306,21 +1340,8 @@ try_again: create_flag = create_flag | O_SYNC; } #endif /* O_SYNC */ -#ifdef O_DIRECT - /* We let O_DIRECT only affect data files */ - if (type != OS_LOG_FILE - && srv_unix_file_flush_method == SRV_UNIX_O_DIRECT) { -# if 0 - fprintf(stderr, "Using O_DIRECT for file %s\n", name); -# endif - create_flag = create_flag | O_DIRECT; - } -#endif /* O_DIRECT */ - if (create_mode == OS_FILE_CREATE) { - file = open(name, create_flag, os_innodb_umask); - } else { - file = open(name, create_flag); - } + + file = open(name, create_flag, os_innodb_umask); if (file == -1) { *success = FALSE; @@ -1330,11 +1351,24 @@ try_again: "create" : "open"); if (retry) { goto try_again; + } else { + return(file /* -1 */); } + } + /* else */ + + *success = TRUE; + + /* We disable OS caching (O_DIRECT) only on data files */ + if (type != OS_LOG_FILE + && srv_unix_file_flush_method == SRV_UNIX_O_DIRECT) { + + os_file_set_nocache(file, name, mode_str); + } + #ifdef USE_FILE_LOCK - } else if (create_mode != OS_FILE_OPEN_RAW - && os_file_lock(file, name)) { - *success = FALSE; + if (create_mode != OS_FILE_OPEN_RAW && os_file_lock(file, name)) { + if (create_mode == OS_FILE_OPEN_RETRY) { int i; ut_print_timestamp(stderr); @@ -1352,12 +1386,12 @@ try_again: fputs(" InnoDB: Unable to open the first data file\n", stderr); } + + *success = FALSE; close(file); file = -1; -#endif - } else { - *success = TRUE; } +#endif /* USE_FILE_LOCK */ return(file); #endif /* __WIN__ */ @@ -1509,7 +1543,7 @@ os_file_rename( return(TRUE); } - os_file_handle_error(oldpath, "rename"); + os_file_handle_error_no_exit(oldpath, "rename"); return(FALSE); #else @@ -1518,7 +1552,7 @@ os_file_rename( ret = rename((const char*)oldpath, (const char*)newpath); if (ret != 0) { - os_file_handle_error(oldpath, "rename"); + os_file_handle_error_no_exit(oldpath, "rename"); return(FALSE); } diff --git a/storage/innobase/page/Makefile.am b/storage/innobase/page/Makefile.am deleted file mode 100644 index 1a5b202a2c9..00000000000 --- a/storage/innobase/page/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libpage.a - -libpage_a_SOURCES = page0page.c page0cur.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c index 4212df7a631..543cf9e34eb 100644 --- a/storage/innobase/page/page0page.c +++ b/storage/innobase/page/page0page.c @@ -209,6 +209,18 @@ page_set_max_trx_id( } } +/***************************************************************** +Calculates free space if a page is emptied. */ + +ulint +page_get_free_space_of_empty_noninline( +/*===================================*/ + /* out: free space */ + ulint comp) /* in: nonzero=compact page format */ +{ + return(page_get_free_space_of_empty(comp)); +} + /**************************************************************** Allocates a block of memory from an index page. */ diff --git a/storage/innobase/pars/Makefile.am b/storage/innobase/pars/Makefile.am deleted file mode 100644 index b10796c3d5e..00000000000 --- a/storage/innobase/pars/Makefile.am +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libpars.a - -noinst_HEADERS = pars0grm.h - -libpars_a_SOURCES = pars0grm.c lexyy.c pars0opt.c pars0pars.c pars0sym.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/pars/lexyy.c b/storage/innobase/pars/lexyy.c index 70daf261186..b65de138573 100644 --- a/storage/innobase/pars/lexyy.c +++ b/storage/innobase/pars/lexyy.c @@ -1017,7 +1017,7 @@ YY_RULE_SETUP yylval = sym_tab_add_bound_lit(pars_sym_tab_global, yytext + 1, &type); - return(type); + return((int) type); } YY_BREAK case 4: diff --git a/storage/innobase/pars/pars0lex.l b/storage/innobase/pars/pars0lex.l index 71ac4c98267..ad65034fab0 100644 --- a/storage/innobase/pars/pars0lex.l +++ b/storage/innobase/pars/pars0lex.l @@ -109,7 +109,7 @@ BOUND_ID \$[a-z_A-Z0-9]+ yylval = sym_tab_add_bound_lit(pars_sym_tab_global, yytext + 1, &type); - return(type); + return((int) type); } {BOUND_ID} { diff --git a/storage/innobase/pars/pars0pars.c b/storage/innobase/pars/pars0pars.c index 6861844870c..89f6f862995 100644 --- a/storage/innobase/pars/pars0pars.c +++ b/storage/innobase/pars/pars0pars.c @@ -1640,7 +1640,8 @@ pars_create_table( while (column) { dtype = dfield_get_type(que_node_get_val(column)); - dict_mem_table_add_col(table, column->name, dtype->mtype, + dict_mem_table_add_col(table, table->heap, + column->name, dtype->mtype, dtype->prtype, dtype->len); column->resolved = TRUE; column->token_type = SYM_COLUMN; @@ -1859,10 +1860,9 @@ pars_sql( heap = mem_heap_create(256); -#ifdef UNIV_SYNC_DEBUG /* Currently, the parser is not reentrant: */ ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + pars_sym_tab_global = sym_tab_create(heap); pars_sym_tab_global->string_len = strlen(str); diff --git a/storage/innobase/plug.in b/storage/innobase/plug.in index 9c21a491d9f..b252d471fba 100644 --- a/storage/innobase/plug.in +++ b/storage/innobase/plug.in @@ -2,13 +2,10 @@ MYSQL_STORAGE_ENGINE(innobase, innodb, [InnoDB Storage Engine], [Transactional Tables using InnoDB], [max,max-no-ndb]) MYSQL_PLUGIN_DIRECTORY(innobase, [storage/innobase]) MYSQL_PLUGIN_STATIC(innobase, [libinnobase.a]) +MYSQL_PLUGIN_DYNAMIC(innobase, [ha_innodb.la]) MYSQL_PLUGIN_ACTIONS(innobase, [ AC_CHECK_LIB(rt, aio_read, [innodb_system_libs="-lrt"]) AC_SUBST(innodb_system_libs) - AC_PROG_CC - AC_PROG_RANLIB - AC_PROG_INSTALL - AC_PROG_LIBTOOL AC_CHECK_HEADERS(aio.h sched.h) AC_CHECK_SIZEOF(int, 4) AC_CHECK_SIZEOF(long, 4) @@ -30,43 +27,13 @@ MYSQL_PLUGIN_ACTIONS(innobase, [ CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; osf*) CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; + *solaris*|*SunOS*) + CFLAGS="$CFLAGS -DUNIV_SOLARIS";; sysv5uw7*) # Problem when linking on SCO CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; openbsd*) CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";; esac - AC_CONFIG_FILES( - storage/innobase/ut/Makefile - storage/innobase/btr/Makefile - storage/innobase/buf/Makefile - storage/innobase/data/Makefile - storage/innobase/dict/Makefile - storage/innobase/dyn/Makefile - storage/innobase/eval/Makefile - storage/innobase/fil/Makefile - storage/innobase/fsp/Makefile - storage/innobase/fut/Makefile - storage/innobase/ha/Makefile - storage/innobase/ibuf/Makefile - storage/innobase/lock/Makefile - storage/innobase/log/Makefile - storage/innobase/mach/Makefile - storage/innobase/mem/Makefile - storage/innobase/mtr/Makefile - storage/innobase/os/Makefile - storage/innobase/page/Makefile - storage/innobase/pars/Makefile - storage/innobase/que/Makefile - storage/innobase/read/Makefile - storage/innobase/rem/Makefile - storage/innobase/row/Makefile - storage/innobase/srv/Makefile - storage/innobase/sync/Makefile - storage/innobase/thr/Makefile - storage/innobase/trx/Makefile - storage/innobase/handler/Makefile - storage/innobase/usr/Makefile) ]) -MYSQL_PLUGIN_DEPENDS_ON_MYSQL_INTERNALS(innobase, [handler/ha_innodb.cc]) diff --git a/storage/innobase/que/Makefile.am b/storage/innobase/que/Makefile.am deleted file mode 100644 index 73f3fb07af4..00000000000 --- a/storage/innobase/que/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libque.a - -libque_a_SOURCES = que0que.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/que/que0que.c b/storage/innobase/que/que0que.c index b2663e30879..bf83f28f04e 100644 --- a/storage/innobase/que/que0que.c +++ b/storage/innobase/que/que0que.c @@ -127,9 +127,7 @@ que_graph_publish( que_t* graph, /* in: graph */ sess_t* sess) /* in: session */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ UT_LIST_ADD_LAST(graphs, sess->graphs, graph); } @@ -238,9 +236,7 @@ que_thr_end_wait( { ibool was_active; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(thr); ut_ad((thr->state == QUE_THR_LOCK_WAIT) || (thr->state == QUE_THR_PROCEDURE_WAIT) @@ -280,9 +276,7 @@ que_thr_end_wait_no_next_thr( ut_a(thr->state == QUE_THR_LOCK_WAIT); /* In MySQL this is the only possible state here */ -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(thr); ut_ad((thr->state == QUE_THR_LOCK_WAIT) || (thr->state == QUE_THR_PROCEDURE_WAIT) @@ -335,6 +329,8 @@ que_fork_start_command( que_fork_t* fork) /* in: a query fork */ { que_thr_t* thr; + que_thr_t* suspended_thr = NULL; + que_thr_t* completed_thr = NULL; fork->state = QUE_FORK_ACTIVE; @@ -344,14 +340,18 @@ que_fork_start_command( but in a parallelized select, which necessarily is non-scrollable, there may be several to choose from */ - /*--------------------------------------------------------------- - First we try to find a query thread in the QUE_THR_COMMAND_WAIT state - */ + /* First we try to find a query thread in the QUE_THR_COMMAND_WAIT + state. Then we try to find a query thread in the QUE_THR_SUSPENDED + state, finally we try to find a query thread in the QUE_THR_COMPLETED + state */ thr = UT_LIST_GET_FIRST(fork->thrs); - while (thr != NULL) { - if (thr->state == QUE_THR_COMMAND_WAIT) { + /* We make a single pass over the thr list within which we note which + threads are ready to run. */ + while (thr) { + switch (thr->state) { + case QUE_THR_COMMAND_WAIT: /* We have to send the initial message to query thread to start it */ @@ -359,49 +359,44 @@ que_fork_start_command( que_thr_init_command(thr); return(thr); - } - - ut_ad(thr->state != QUE_THR_LOCK_WAIT); - - thr = UT_LIST_GET_NEXT(thrs, thr); - } - - /*---------------------------------------------------------------- - Then we try to find a query thread in the QUE_THR_SUSPENDED state */ - thr = UT_LIST_GET_FIRST(fork->thrs); - - while (thr != NULL) { - if (thr->state == QUE_THR_SUSPENDED) { + case QUE_THR_SUSPENDED: /* In this case the execution of the thread was suspended: no initial message is needed because execution can continue from where it was left */ + if (!suspended_thr) { + suspended_thr = thr; + } - que_thr_move_to_run_state(thr); + break; + + case QUE_THR_COMPLETED: + if (!completed_thr) { + completed_thr = thr; + } + + break; + + case QUE_THR_LOCK_WAIT: + ut_error; - return(thr); } thr = UT_LIST_GET_NEXT(thrs, thr); } - /*----------------------------------------------------------------- - Then we try to find a query thread in the QUE_THR_COMPLETED state */ + if (suspended_thr) { - thr = UT_LIST_GET_FIRST(fork->thrs); + thr = suspended_thr; + que_thr_move_to_run_state(thr); - while (thr != NULL) { - if (thr->state == QUE_THR_COMPLETED) { - que_thr_init_command(thr); + } else if (completed_thr) { - return(thr); - } - - thr = UT_LIST_GET_NEXT(thrs, thr); + thr = completed_thr; + que_thr_init_command(thr); } - /* Else we return NULL */ - return(NULL); + return(thr); } /************************************************************************** @@ -418,9 +413,7 @@ que_fork_error_handle( { que_thr_t* thr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(trx->sess->state == SESS_ERROR); ut_ad(UT_LIST_GET_LEN(trx->reply_signals) == 0); ut_ad(UT_LIST_GET_LEN(trx->wait_thrs) == 0); @@ -697,9 +690,7 @@ que_graph_try_free( { sess_t* sess; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ sess = (graph->trx)->sess; @@ -930,9 +921,7 @@ que_thr_stop( que_t* graph; ibool ret = TRUE; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ graph = thr->graph; trx = graph->trx; @@ -1308,10 +1297,7 @@ que_run_threads_low( ut_ad(thr->state == QUE_THR_RUNNING); ut_a(thr_get_trx(thr)->error_state == DB_SUCCESS); - -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* cumul_resource counts how much resources the OS thread (NOT the query thread) has spent in this function */ diff --git a/storage/innobase/read/Makefile.am b/storage/innobase/read/Makefile.am deleted file mode 100644 index 1e56a9716c3..00000000000 --- a/storage/innobase/read/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libread.a - -libread_a_SOURCES = read0read.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/read/read0read.c b/storage/innobase/read/read0read.c index 2375c35190a..10a6e07e96a 100644 --- a/storage/innobase/read/read0read.c +++ b/storage/innobase/read/read0read.c @@ -162,9 +162,8 @@ read_view_oldest_copy_or_open_new( ulint n; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + old_view = UT_LIST_GET_LAST(trx_sys->view_list); if (old_view == NULL) { @@ -245,9 +244,9 @@ read_view_open_now( read_view_t* view; trx_t* trx; ulint n; -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + view = read_view_create_low(UT_LIST_GET_LEN(trx_sys->trx_list), heap); view->creator_trx_id = cr_trx_id; @@ -313,9 +312,8 @@ read_view_close( /*============*/ read_view_t* view) /* in: read view */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + UT_LIST_REMOVE(view_list, trx_sys->view_list, view); } diff --git a/storage/innobase/rem/Makefile.am b/storage/innobase/rem/Makefile.am deleted file mode 100644 index 1026172b815..00000000000 --- a/storage/innobase/rem/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = librem.a - -librem_a_SOURCES = rem0rec.c rem0cmp.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/rem/rem0cmp.c b/storage/innobase/rem/rem0cmp.c index 07e5b64c157..ca0ec663548 100644 --- a/storage/innobase/rem/rem0cmp.c +++ b/storage/innobase/rem/rem0cmp.c @@ -597,7 +597,7 @@ cmp_dtuple_rec_with_match( dtuple_byte = cmp_collate(dtuple_byte); } - ret = dtuple_byte - rec_byte; + ret = (int) (dtuple_byte - rec_byte); if (UNIV_UNLIKELY(ret)) { if (ret < 0) { ret = -1; diff --git a/storage/innobase/rem/rem0rec.c b/storage/innobase/rem/rem0rec.c index 549b5ee8b28..64f8e2d319c 100644 --- a/storage/innobase/rem/rem0rec.c +++ b/storage/innobase/rem/rem0rec.c @@ -153,7 +153,6 @@ static void rec_init_offsets( /*=============*/ - /* out: the offsets */ rec_t* rec, /* in: physical record */ dict_index_t* index, /* in: record descriptor */ ulint* offsets)/* in/out: array of offsets; @@ -189,7 +188,7 @@ rec_init_offsets( } nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - lens = nulls - (index->n_nullable + 7) / 8; + lens = nulls - UT_BITS_IN_BYTES(index->n_nullable); offs = 0; null_mask = 1; @@ -304,7 +303,7 @@ rec_get_offsets_func( /* out: the new offsets */ rec_t* rec, /* in: physical record */ dict_index_t* index, /* in: record descriptor */ - ulint* offsets,/* in: array consisting of offsets[0] + ulint* offsets,/* in/out: array consisting of offsets[0] allocated elements, or an array from rec_get_offsets(), or NULL */ ulint n_fields,/* in: maximum number of initialized fields @@ -440,7 +439,7 @@ rec_get_converted_size_new( dtuple_t* dtuple) /* in: data tuple */ { ulint size = REC_N_NEW_EXTRA_BYTES - + (index->n_nullable + 7) / 8; + + UT_BITS_IN_BYTES(index->n_nullable); ulint i; ulint n_fields; ut_ad(index && dtuple); @@ -459,10 +458,10 @@ rec_get_converted_size_new( break; case REC_STATUS_INFIMUM: case REC_STATUS_SUPREMUM: - /* infimum or supremum record, 8 bytes */ - return(size + 8); /* no extra data needed */ + /* infimum or supremum record, 8 data bytes */ + return(REC_N_NEW_EXTRA_BYTES + 8); default: - ut_a(0); + ut_error; return(ULINT_UNDEFINED); } @@ -476,21 +475,31 @@ rec_get_converted_size_new( len = dtuple_get_nth_field(dtuple, i)->len; col = dict_field_get_col(field); - ut_ad(len != UNIV_SQL_NULL || !(col->prtype & DATA_NOT_NULL)); + ut_ad(dict_col_type_assert_equal( + col, dfield_get_type(dtuple_get_nth_field( + dtuple, i)))); if (len == UNIV_SQL_NULL) { /* No length is stored for NULL fields. */ + ut_ad(!(col->prtype & DATA_NOT_NULL)); continue; } ut_ad(len <= col->len || col->mtype == DATA_BLOB); - ut_ad(!field->fixed_len || len == field->fixed_len); if (field->fixed_len) { + ut_ad(len == field->fixed_len); + /* dict_index_add_col() should guarantee this */ + ut_ad(!field->prefix_len + || field->fixed_len == field->prefix_len); } else if (len < 128 || (col->len < 256 && col->mtype != DATA_BLOB)) { size++; } else { + /* For variable-length columns, we look up the + maximum length from the column itself. If this + is a prefix index column shorter than 256 bytes, + this will waste one byte. */ size += 2; } size += len; @@ -586,7 +595,7 @@ rec_set_nth_field_extern_bit_new( we do not write to log about the change */ { byte* nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - byte* lens = nulls - (index->n_nullable + 7) / 8; + byte* lens = nulls - UT_BITS_IN_BYTES(index->n_nullable); ulint i; ulint n_fields; ulint null_mask = 1; @@ -744,7 +753,11 @@ rec_convert_dtuple_to_rec_old( /* Calculate the offset of the origin in the physical record */ rec = buf + rec_get_converted_extra_size(data_size, n_fields); - +#ifdef UNIV_DEBUG + /* Suppress Valgrind warnings of ut_ad() + in mach_write_to_1(), mach_write_to_2() et al. */ + memset(buf, 0xff, rec - buf + data_size); +#endif /* UNIV_DEBUG */ /* Store the number of fields */ rec_set_n_fields_old(rec, n_fields); @@ -875,7 +888,7 @@ rec_convert_dtuple_to_rec_new( /* Calculate the offset of the origin in the physical record. We must loop over all fields to do this. */ - rec += (index->n_nullable + 7) / 8; + rec += UT_BITS_IN_BYTES(index->n_nullable); for (i = 0; i < n_fields; i++) { if (UNIV_UNLIKELY(i == n_node_ptr_field)) { @@ -892,6 +905,11 @@ rec_convert_dtuple_to_rec_new( len = dfield_get_len(field); fixed_len = dict_index_get_nth_field(index, i)->fixed_len; + ut_ad(dict_col_type_assert_equal( + dict_field_get_col(dict_index_get_nth_field( + index, i)), + dfield_get_type(field))); + if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) { if (len == UNIV_SQL_NULL) continue; @@ -915,7 +933,7 @@ rec_convert_dtuple_to_rec_new( init: end = rec; nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - lens = nulls - (index->n_nullable + 7) / 8; + lens = nulls - UT_BITS_IN_BYTES(index->n_nullable); /* clear the SQL-null flags */ memset (lens + 1, 0, nulls - lens); @@ -1172,7 +1190,7 @@ rec_copy_prefix_to_buf( } nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1); - lens = nulls - (index->n_nullable + 7) / 8; + lens = nulls - UT_BITS_IN_BYTES(index->n_nullable); UNIV_PREFETCH_R(lens); prefix_len = 0; null_mask = 1; diff --git a/storage/innobase/row/Makefile.am b/storage/innobase/row/Makefile.am deleted file mode 100644 index 6c1f960055d..00000000000 --- a/storage/innobase/row/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = librow.a - -librow_a_SOURCES = row0ins.c row0mysql.c row0purge.c row0row.c row0sel.c\ - row0uins.c row0umod.c row0undo.c row0upd.c row0vers.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c index 1fba0abcdaf..ad14b927170 100644 --- a/storage/innobase/row/row0ins.c +++ b/storage/innobase/row/row0ins.c @@ -51,21 +51,6 @@ innobase_invalidate_query_cache( ulint full_name_len); /* in: full name length where also the null chars count */ -/********************************************************************** -This function returns true if - -1) SQL-query in the current thread -is either REPLACE or LOAD DATA INFILE REPLACE. - -2) SQL-query in the current thread -is INSERT ON DUPLICATE KEY UPDATE. - -NOTE that /mysql/innobase/row/row0ins.c must contain the -prototype for this function ! */ - -ibool -innobase_query_is_update(void); - /************************************************************************* Creates an insert node struct. */ @@ -448,7 +433,11 @@ row_ins_cascade_calc_update_vec( ulint i; ulint j; - ut_a(node && foreign && cascade && table && index); + ut_a(node); + ut_a(foreign); + ut_a(cascade); + ut_a(table); + ut_a(index); /* Calculate the appropriate update vector which will set the fields in the child index record to the same value (possibly padded with @@ -791,7 +780,10 @@ row_ins_foreign_check_on_constraint( trx_t* trx; mem_heap_t* tmp_heap = NULL; - ut_a(thr && foreign && pcur && mtr); + ut_a(thr); + ut_a(foreign); + ut_a(pcur); + ut_a(mtr); trx = thr_get_trx(thr); @@ -1308,7 +1300,8 @@ run_again: goto exit_func; } - ut_a(check_table && check_index); + ut_a(check_table); + ut_a(check_index); if (check_table != table) { /* We already have a LOCK_IX on table, but not necessarily @@ -1336,11 +1329,9 @@ run_again: /* Scan index records and check if there is a matching record */ for (;;) { - page_t* page; rec = btr_pcur_get_rec(&pcur); - page = buf_frame_align(rec); - if (rec == page_get_infimum_rec(page)) { + if (page_rec_is_infimum(rec)) { goto next_rec; } @@ -1348,7 +1339,7 @@ run_again: offsets = rec_get_offsets(rec, check_index, offsets, ULINT_UNDEFINED, &heap); - if (rec == page_get_supremum_rec(page)) { + if (page_rec_is_supremum(rec)) { err = row_ins_set_shared_rec_lock( LOCK_ORDINARY, rec, check_index, offsets, thr); @@ -1654,6 +1645,7 @@ row_ins_scan_sec_index_for_duplicate( btr_pcur_t pcur; ulint err = DB_SUCCESS; ibool moved; + unsigned allow_duplicates; mtr_t mtr; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; @@ -1684,12 +1676,14 @@ row_ins_scan_sec_index_for_duplicate( btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); + allow_duplicates = thr_get_trx(thr)->duplicates & TRX_DUP_IGNORE; + /* Scan index records and check if there is a duplicate */ for (;;) { rec = btr_pcur_get_rec(&pcur); - if (rec == page_get_infimum_rec(buf_frame_align(rec))) { + if (page_rec_is_infimum(rec)) { goto next_rec; } @@ -1697,7 +1691,7 @@ row_ins_scan_sec_index_for_duplicate( offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); - if (innobase_query_is_update()) { + if (allow_duplicates) { /* If the SQL-query will update or replace duplicate key we will take X-lock for @@ -1826,7 +1820,7 @@ row_ins_duplicate_error_in_clust( sure that in roll-forward we get the same duplicate errors as in original execution */ - if (innobase_query_is_update()) { + if (trx->duplicates & TRX_DUP_IGNORE) { /* If the SQL-query will update or replace duplicate key we will take X-lock for @@ -1864,7 +1858,7 @@ row_ins_duplicate_error_in_clust( offsets = rec_get_offsets(rec, cursor->index, offsets, ULINT_UNDEFINED, &heap); - if (innobase_query_is_update()) { + if (trx->duplicates & TRX_DUP_IGNORE) { /* If the SQL-query will update or replace duplicate key we will take X-lock for diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c index 6779f536daa..b8d201e3da2 100644 --- a/storage/innobase/row/row0mysql.c +++ b/storage/innobase/row/row0mysql.c @@ -476,7 +476,8 @@ handle_new_error: /* MySQL will roll back the latest SQL statement */ } else if (err == DB_ROW_IS_REFERENCED || err == DB_NO_REFERENCED_ROW - || err == DB_CANNOT_ADD_CONSTRAINT) { + || err == DB_CANNOT_ADD_CONSTRAINT + || err == DB_TOO_MANY_CONCURRENT_TRXS) { if (savept) { /* Roll back the latest, possibly incomplete insertion or update */ @@ -654,6 +655,8 @@ row_create_prebuilt( prebuilt->old_vers_heap = NULL; + prebuilt->last_value = 0; + return(prebuilt); } @@ -1748,8 +1751,8 @@ row_create_table_for_mysql( ut_ad(trx->mysql_thread_id == os_thread_get_curr_id()); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); - ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&(dict_sys->mutex))); ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); if (srv_created_new_raw) { @@ -1964,8 +1967,8 @@ row_create_index_for_mysql( #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); - ut_ad(mutex_own(&(dict_sys->mutex))); #endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&(dict_sys->mutex))); ut_ad(trx->mysql_thread_id == os_thread_get_curr_id()); trx->op_info = "creating index"; @@ -2080,8 +2083,8 @@ row_table_add_foreign_constraints( { ulint err; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); +#ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_a(sql_string); @@ -2246,9 +2249,7 @@ row_get_background_drop_list_len_low(void) /*======================================*/ /* out: how many tables in list */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (!row_mysql_drop_list_inited) { @@ -2726,8 +2727,8 @@ row_truncate_table_for_mysql( row_mysql_lock_data_dictionary(trx); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); +#ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ @@ -2895,6 +2896,8 @@ next_rec: dict_table_change_id_in_cache(table, new_id); } + /* MySQL calls ha_innobase::reset_auto_increment() which does + the same thing. */ dict_table_autoinc_initialize(table, 0); dict_update_statistics(table); @@ -3001,8 +3004,8 @@ row_drop_table_for_mysql( locked_dictionary = TRUE; } -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(dict_sys->mutex))); +#ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ @@ -3423,7 +3426,7 @@ row_delete_constraint_low( pars_info_add_str_literal(info, "id", id); - return(que_eval_sql(info, + return((int) que_eval_sql(info, "PROCEDURE DELETE_CONSTRAINT () IS\n" "BEGIN\n" "DELETE FROM SYS_FOREIGN_COLS WHERE ID = :id;\n" @@ -3462,7 +3465,7 @@ row_delete_constraint( err = row_delete_constraint_low(id, trx); } - return(err); + return((int) err); } /************************************************************************* diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index efa129d6211..08e50817db9 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -142,20 +142,15 @@ row_build_index_entry( dfield_copy(dfield, dfield2); /* If a column prefix index, take only the prefix */ - if (ind_field->prefix_len) { - if (dfield_get_len(dfield2) != UNIV_SQL_NULL) { + if (ind_field->prefix_len > 0 + && dfield_get_len(dfield2) != UNIV_SQL_NULL) { - storage_len = dtype_get_at_most_n_mbchars( - col->prtype, - col->mbminlen, col->mbmaxlen, - ind_field->prefix_len, - dfield_get_len(dfield2), - dfield2->data); - - dfield_set_len(dfield, storage_len); - } + storage_len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + ind_field->prefix_len, + dfield_get_len(dfield2), dfield2->data); - dfield_get_type(dfield)->len = ind_field->prefix_len; + dfield_set_len(dfield, storage_len); } } @@ -478,7 +473,9 @@ row_build_row_ref_in_tuple( ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - ut_a(ref && index && rec); + ut_a(ref); + ut_a(index); + ut_a(rec); if (UNIV_UNLIKELY(!index->table)) { fputs("InnoDB: table ", stderr); diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c index bee9f1472ce..fdf6aa46351 100644 --- a/storage/innobase/row/row0sel.c +++ b/storage/innobase/row/row0sel.c @@ -2116,7 +2116,7 @@ row_fetch_store_uint4( ut_a(len == 4); tmp = mach_read_from_4(dfield_get_data(dfield)); - *val = tmp; + *val = (ib_uint32_t) tmp; return(NULL); } @@ -3619,6 +3619,32 @@ shortcut_fails_too_big_rec: pcur, 0, &mtr); pcur->trx_if_known = trx; + + rec = btr_pcur_get_rec(pcur); + + if (!moves_up + && !page_rec_is_supremum(rec) + && set_also_gap_locks + && !(srv_locks_unsafe_for_binlog + || trx->isolation_level == TRX_ISO_READ_COMMITTED) + && prebuilt->select_lock_type != LOCK_NONE) { + + /* Try to place a gap lock on the next index record + to prevent phantoms in ORDER BY ... DESC queries */ + + offsets = rec_get_offsets(page_rec_get_next(rec), + index, offsets, + ULINT_UNDEFINED, &heap); + err = sel_set_rec_lock(page_rec_get_next(rec), + index, offsets, + prebuilt->select_lock_type, + LOCK_GAP, thr); + + if (err != DB_SUCCESS) { + + goto lock_wait_or_error; + } + } } else { if (mode == PAGE_CUR_G) { btr_pcur_open_at_index_side( @@ -4493,3 +4519,149 @@ row_search_check_if_query_cache_permitted( return(ret); } + +/*********************************************************************** +Read the AUTOINC column from the current row. */ +static +ib_longlong +row_search_autoinc_read_column( +/*===========================*/ + /* out: value read from the column */ + dict_index_t* index, /* in: index to read from */ + const rec_t* rec, /* in: current rec */ + ulint col_no, /* in: column number */ + ibool unsigned_type) /* in: signed or unsigned flag */ +{ + ulint len; + const byte* data; + ib_longlong value; + mem_heap_t* heap = NULL; + byte dest[sizeof(value)]; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + + *offsets_ = sizeof offsets_ / sizeof *offsets_; + + /* TODO: We have to cast away the const of rec for now. This needs + to be fixed later.*/ + offsets = rec_get_offsets( + (rec_t*) rec, index, offsets, ULINT_UNDEFINED, &heap); + + /* TODO: We have to cast away the const of rec for now. This needs + to be fixed later.*/ + data = rec_get_nth_field((rec_t*)rec, offsets, col_no, &len); + + ut_a(len != UNIV_SQL_NULL); + ut_a(len <= sizeof value); + + /* Copy integer data and restore sign bit */ + if (unsigned_type || (data[0] & 128)) + memset(dest, 0x00, sizeof(dest)); + else + memset(dest, 0xff, sizeof(dest)); + + memcpy(dest + (sizeof(value) - len), data, len); + + if (!unsigned_type) + dest[sizeof(value) - len] ^= 128; + + /* The assumption here is that the AUTOINC value can't be negative.*/ + value = (((ib_longlong) mach_read_from_4(dest)) << 32) | + ((ib_longlong) mach_read_from_4(dest + 4)); + + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + + ut_a(value >= 0); + + return(value); +} + +/*********************************************************************** +Get the last row. */ +static +const rec_t* +row_search_autoinc_get_rec( +/*=======================*/ + /* out: current rec or NULL */ + btr_pcur_t* pcur, /* in: the current cursor */ + mtr_t* mtr) /* in: mini transaction */ +{ + do { + const rec_t* rec = btr_pcur_get_rec(pcur); + + if (page_rec_is_user_rec(rec)) { + return(rec); + } + } while (btr_pcur_move_to_prev(pcur, mtr)); + + return(NULL); +} + +/*********************************************************************** +Read the max AUTOINC value from an index. */ + +ulint +row_search_max_autoinc( +/*===================*/ + /* out: DB_SUCCESS if all OK else + error code, DB_RECORD_NOT_FOUND if + column name can't be found in index */ + dict_index_t* index, /* in: index to search */ + const char* col_name, /* in: name of autoinc column */ + ib_longlong* value) /* out: AUTOINC value read */ +{ + ulint i; + ulint n_cols; + dict_field_t* dfield = NULL; + ulint error = DB_SUCCESS; + + n_cols = dict_index_get_n_ordering_defined_by_user(index); + + /* Search the index for the AUTOINC column name */ + for (i = 0; i < n_cols; ++i) { + dfield = dict_index_get_nth_field(index, i); + + if (strcmp(col_name, dfield->name) == 0) { + break; + } + } + + *value = 0; + + /* Must find the AUTOINC column name */ + if (i < n_cols && dfield) { + mtr_t mtr; + btr_pcur_t pcur; + + mtr_start(&mtr); + + /* Open at the high/right end (FALSE), and INIT + cursor (TRUE) */ + btr_pcur_open_at_index_side( + FALSE, index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr); + + if (page_get_n_recs(btr_pcur_get_page(&pcur)) > 0) { + const rec_t* rec; + + rec = row_search_autoinc_get_rec(&pcur, &mtr); + + if (rec != NULL) { + ibool unsigned_type = ( + dfield->col->prtype & DATA_UNSIGNED); + + *value = row_search_autoinc_read_column( + index, rec, i, unsigned_type); + } + } + + btr_pcur_close(&pcur); + + mtr_commit(&mtr); + } else { + error = DB_RECORD_NOT_FOUND; + } + + return(error); +} diff --git a/storage/innobase/row/row0undo.c b/storage/innobase/row/row0undo.c index 2f04e65e8ee..f03f84ed1b0 100644 --- a/storage/innobase/row/row0undo.c +++ b/storage/innobase/row/row0undo.c @@ -213,7 +213,7 @@ row_undo( ulint err; trx_t* trx; dulint roll_ptr; - ibool froze_data_dict = FALSE; + ibool locked_data_dict; ut_ad(node && thr); @@ -266,13 +266,13 @@ row_undo( /* Prevent DROP TABLE etc. while we are rolling back this row. If we are doing a TABLE CREATE or some other dictionary operation, then we already have dict_operation_lock locked in x-mode. Do not - try to lock again in s-mode, because that would cause a hang. */ + try to lock again, because that would cause a hang. */ - if (trx->dict_operation_lock_mode == 0) { + locked_data_dict = (trx->dict_operation_lock_mode == 0); - row_mysql_freeze_data_dictionary(trx); + if (locked_data_dict) { - froze_data_dict = TRUE; + row_mysql_lock_data_dictionary(trx); } if (node->state == UNDO_NODE_INSERT) { @@ -285,9 +285,9 @@ row_undo( err = row_undo_mod(node, thr); } - if (froze_data_dict) { + if (locked_data_dict) { - row_mysql_unfreeze_data_dictionary(trx); + row_mysql_unlock_data_dictionary(trx); } /* Do some cleanup */ diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index c8b71965f75..03d9a2f1203 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -63,8 +63,8 @@ row_vers_impl_x_locked_off_kernel( mtr_t mtr; ulint comp; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); +#ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ diff --git a/storage/innobase/srv/Makefile.am b/storage/innobase/srv/Makefile.am deleted file mode 100644 index e0b5b911b04..00000000000 --- a/storage/innobase/srv/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libsrv.a - -libsrv_a_SOURCES = srv0srv.c srv0que.c srv0start.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/srv/srv0que.c b/storage/innobase/srv/srv0que.c index 9c261cbb00e..e2b4e217980 100644 --- a/storage/innobase/srv/srv0que.c +++ b/storage/innobase/srv/srv0que.c @@ -82,10 +82,7 @@ srv_que_task_enqueue_low( que_thr_t* thr) /* in: query thread */ { ut_ad(thr); - -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr); diff --git a/storage/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c index 5d92b913934..82b55789be2 100644 --- a/storage/innobase/srv/srv0srv.c +++ b/storage/innobase/srv/srv0srv.c @@ -47,6 +47,7 @@ Created 10/8/1995 Heikki Tuuri #include "dict0boot.h" #include "srv0start.h" #include "row0mysql.h" +#include "ha_prototypes.h" /* This is set to TRUE if the MySQL user has set it in MySQL; currently affects only FOREIGN KEY definition parsing */ @@ -180,6 +181,16 @@ dulint srv_archive_recovery_limit_lsn; ulint srv_lock_wait_timeout = 1024 * 1024 * 1024; +/* This parameter is used to throttle the number of insert buffers that are +merged in a batch. By increasing this parameter on a faster disk you can +possibly reduce the number of I/O operations performed to complete the +merge operation. The value of this parameter is used as is by the +background loop when the system is idle (low load), on a busy system +the parameter is scaled down by a factor of 4, this is to avoid putting +a heavier load on the I/O sub system. */ + +ulong srv_insert_buffer_batch_size = 20; + char* srv_file_flush_method_str = NULL; ulint srv_unix_file_flush_method = SRV_UNIX_FDATASYNC; ulint srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED; @@ -317,6 +328,8 @@ ulint srv_fast_shutdown = 0; /* Generate a innodb_status.<pid> file */ ibool srv_innodb_status = FALSE; +ibool srv_stats_on_metadata = TRUE; + ibool srv_use_doublewrite_buf = TRUE; ibool srv_use_checksums = TRUE; @@ -724,9 +737,7 @@ srv_suspend_thread(void) ulint slot_no; ulint type; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ slot_no = thr_local_get_slot_no(os_thread_get_curr_id()); @@ -778,9 +789,7 @@ srv_release_threads( ut_ad(type >= SRV_WORKER); ut_ad(type <= SRV_MASTER); ut_ad(n > 0); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < OS_THREAD_MAX_N; i++) { @@ -898,7 +907,7 @@ srv_init(void) /* create dummy table and index for old-style infimum and supremum */ table = dict_mem_table_create("SYS_DUMMY1", DICT_HDR_SPACE, 1, 0); - dict_mem_table_add_col(table, "DUMMY", DATA_CHAR, + dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR, DATA_ENGLISH | DATA_NOT_NULL, 8); srv_sys->dummy_ind1 = dict_mem_index_create( @@ -909,7 +918,7 @@ srv_init(void) /* create dummy table and index for new-style infimum and supremum */ table = dict_mem_table_create("SYS_DUMMY2", DICT_HDR_SPACE, 1, DICT_TF_COMPACT); - dict_mem_table_add_col(table, "DUMMY", DATA_CHAR, + dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR, DATA_ENGLISH | DATA_NOT_NULL, 8); srv_sys->dummy_ind2 = dict_mem_index_create( "SYS_DUMMY2", "SYS_DUMMY2", DICT_HDR_SPACE, 0, 1); @@ -979,6 +988,17 @@ srv_conc_enter_innodb( srv_conc_slot_t* slot = NULL; ulint i; + if (trx->mysql_thd != NULL + && thd_is_replication_slave_thread(trx->mysql_thd)) { + + /* TODO Do something more interesting (based on a config + parameter). Some users what to give the replication + thread very low priority, see http://bugs.mysql.com/25078 + This can be done by introducing + innodb_replication_delay(ms) config parameter */ + return; + } + /* If trx has 'free tickets' to enter the engine left, then use one such ticket */ @@ -1019,7 +1039,7 @@ retry: if (!has_slept && !trx->has_search_latch && NULL == UT_LIST_GET_FIRST(trx->trx_locks)) { - has_slept = TRUE; /* We let is sleep only once to avoid + has_slept = TRUE; /* We let it sleep only once to avoid starvation */ srv_conc_n_waiting_threads++; @@ -1132,7 +1152,7 @@ srv_conc_force_enter_innodb( srv_conc_n_threads++; trx->declared_to_be_inside_innodb = TRUE; - trx->n_tickets_to_enter_innodb = 0; + trx->n_tickets_to_enter_innodb = 1; os_fast_mutex_unlock(&srv_conc_mutex); } @@ -1154,6 +1174,12 @@ srv_conc_force_exit_innodb( return; } + if (trx->mysql_thd != NULL + && thd_is_replication_slave_thread(trx->mysql_thd)) { + + return; + } + if (trx->declared_to_be_inside_innodb == FALSE) { return; @@ -1303,9 +1329,7 @@ srv_table_reserve_slot_for_mysql(void) srv_slot_t* slot; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ i = 0; slot = srv_mysql_table + i; @@ -1385,9 +1409,7 @@ srv_suspend_mysql_thread( ulint sec; ulint ms; -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ trx = thr_get_trx(thr); @@ -1533,9 +1555,7 @@ srv_release_mysql_thread_if_suspended( srv_slot_t* slot; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ for (i = 0; i < OS_THREAD_MAX_N; i++) { @@ -1828,15 +1848,15 @@ srv_export_innodb_status(void) export_vars.innodb_row_lock_waits = srv_n_lock_wait_count; export_vars.innodb_row_lock_current_waits = srv_n_lock_wait_current_count; - export_vars.innodb_row_lock_time = srv_n_lock_wait_time / 10000; + export_vars.innodb_row_lock_time = srv_n_lock_wait_time / 1000; if (srv_n_lock_wait_count > 0) { export_vars.innodb_row_lock_time_avg = (ulint) - (srv_n_lock_wait_time / 10000 / srv_n_lock_wait_count); + (srv_n_lock_wait_time / 1000 / srv_n_lock_wait_count); } else { export_vars.innodb_row_lock_time_avg = 0; } export_vars.innodb_row_lock_time_max - = srv_n_lock_max_wait_time / 10000; + = srv_n_lock_max_wait_time / 1000; export_vars.innodb_rows_read = srv_n_rows_read; export_vars.innodb_rows_inserted = srv_n_rows_inserted; export_vars.innodb_rows_updated = srv_n_rows_updated; @@ -1861,6 +1881,7 @@ srv_lock_timeout_and_monitor_thread( double time_elapsed; time_t current_time; time_t last_table_monitor_time; + time_t last_tablespace_monitor_time; time_t last_monitor_time; ibool some_waits; double wait_time; @@ -1873,6 +1894,7 @@ srv_lock_timeout_and_monitor_thread( UT_NOT_USED(arg); srv_last_monitor_time = time(NULL); last_table_monitor_time = time(NULL); + last_tablespace_monitor_time = time(NULL); last_monitor_time = time(NULL); loop: srv_lock_timeout_and_monitor_active = TRUE; @@ -1909,9 +1931,9 @@ loop: } if (srv_print_innodb_tablespace_monitor - && difftime(current_time, last_table_monitor_time) > 60) { - - last_table_monitor_time = time(NULL); + && difftime(current_time, + last_tablespace_monitor_time) > 60) { + last_tablespace_monitor_time = time(NULL); fputs("========================" "========================\n", @@ -2108,7 +2130,7 @@ loop: os_thread_sleep(2000000); - if (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) { + if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { goto loop; } @@ -2278,7 +2300,8 @@ loop: + buf_pool->n_pages_written; if (n_pend_ios < 3 && (n_ios - n_ios_old < 5)) { srv_main_thread_op_info = "doing insert buffer merge"; - ibuf_contract_for_n_pages(TRUE, 5); + ibuf_contract_for_n_pages( + TRUE, srv_insert_buffer_batch_size / 4); srv_main_thread_op_info = "flushing log"; @@ -2339,7 +2362,7 @@ loop: even if the server were active */ srv_main_thread_op_info = "doing insert buffer merge"; - ibuf_contract_for_n_pages(TRUE, 5); + ibuf_contract_for_n_pages(TRUE, srv_insert_buffer_batch_size / 4); srv_main_thread_op_info = "flushing log"; log_buffer_flush_to_disk(); @@ -2477,7 +2500,8 @@ background_loop: if (srv_fast_shutdown && srv_shutdown_state > 0) { n_bytes_merged = 0; } else { - n_bytes_merged = ibuf_contract_for_n_pages(TRUE, 20); + n_bytes_merged = ibuf_contract_for_n_pages( + TRUE, srv_insert_buffer_batch_size); } srv_main_thread_op_info = "reserving kernel mutex"; diff --git a/storage/innobase/srv/srv0start.c b/storage/innobase/srv/srv0start.c index 25f6f05e878..dac84e1410d 100644 --- a/storage/innobase/srv/srv0start.c +++ b/storage/innobase/srv/srv0start.c @@ -1025,6 +1025,12 @@ innobase_start_or_create_for_mysql(void) "InnoDB: !!!!!!!! UNIV_DEBUG switched on !!!!!!!!!\n"); #endif +#ifdef UNIV_IBUF_DEBUG + fprintf(stderr, + "InnoDB: !!!!!!!! UNIV_IBUF_DEBUG switched on !!!!!!!!!\n" + "InnoDB: Crash recovery will fail with UNIV_IBUF_DEBUG\n"); +#endif + #ifdef UNIV_SYNC_DEBUG fprintf(stderr, "InnoDB: !!!!!!!! UNIV_SYNC_DEBUG switched on !!!!!!!!!\n"); diff --git a/storage/innobase/sync/Makefile.am b/storage/innobase/sync/Makefile.am deleted file mode 100644 index 7cf274b64e8..00000000000 --- a/storage/innobase/sync/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libsync.a - -libsync_a_SOURCES = sync0arr.c sync0rw.c sync0sync.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/sync/sync0arr.c b/storage/innobase/sync/sync0arr.c index e45cd48a6b4..82b08a890e0 100644 --- a/storage/innobase/sync/sync0arr.c +++ b/storage/innobase/sync/sync0arr.c @@ -670,7 +670,9 @@ sync_array_detect_deadlock( ibool ret; rw_lock_debug_t*debug; - ut_a(arr && start && cell); + ut_a(arr); + ut_a(start); + ut_a(cell); ut_ad(cell->wait_object); ut_ad(os_thread_get_curr_id() == start->thread); ut_ad(depth < 100); diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c index 549ad36271b..4db780c8b3f 100644 --- a/storage/innobase/sync/sync0rw.c +++ b/storage/innobase/sync/sync0rw.c @@ -15,16 +15,34 @@ Created 9/11/1995 Heikki Tuuri #include "mem0mem.h" #include "srv0srv.h" +/* number of system calls made during shared latching */ ulint rw_s_system_call_count = 0; + +/* number of spin waits on rw-latches, +resulted during shared (read) locks */ ulint rw_s_spin_wait_count = 0; + +/* number of OS waits on rw-latches, +resulted during shared (read) locks */ ulint rw_s_os_wait_count = 0; +/* number of unlocks (that unlock shared locks), +set only when UNIV_SYNC_PERF_STAT is defined */ ulint rw_s_exit_count = 0; +/* number of system calls made during exclusive latching */ ulint rw_x_system_call_count = 0; + +/* number of spin waits on rw-latches, +resulted during exclusive (write) locks */ ulint rw_x_spin_wait_count = 0; + +/* number of OS waits on rw-latches, +resulted during exclusive (write) locks */ ulint rw_x_os_wait_count = 0; +/* number of unlocks (that unlock exclusive locks), +set only when UNIV_SYNC_PERF_STAT is defined */ ulint rw_x_exit_count = 0; /* The global list of rw-locks */ @@ -127,7 +145,7 @@ rw_lock_create_func( lock->magic_n = RW_LOCK_MAGIC_N; lock->cfile_name = cfile_name; - lock->cline = cline; + lock->cline = (unsigned int) cline; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; @@ -339,9 +357,8 @@ rw_lock_x_lock_low( const char* file_name,/* in: file name where lock requested */ ulint line) /* in: line where requested */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(rw_lock_get_mutex(lock))); -#endif /* UNIV_SYNC_DEBUG */ + if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) { if (rw_lock_get_reader_count(lock) == 0) { @@ -356,7 +373,7 @@ rw_lock_x_lock_low( file_name, line); #endif lock->last_x_file_name = file_name; - lock->last_x_line = line; + lock->last_x_line = (unsigned int) line; /* Locking succeeded, we may return */ return(RW_LOCK_EX); @@ -393,7 +410,7 @@ rw_lock_x_lock_low( #endif lock->last_x_file_name = file_name; - lock->last_x_line = line; + lock->last_x_line = (unsigned int) line; /* Locking succeeded, we may return */ return(RW_LOCK_EX); @@ -415,7 +432,7 @@ rw_lock_x_lock_low( #endif lock->last_x_file_name = file_name; - lock->last_x_line = line; + lock->last_x_line = (unsigned int) line; /* Locking succeeded, we may return */ return(RW_LOCK_EX); @@ -564,8 +581,7 @@ rw_lock_debug_mutex_enter(void) /*==========================*/ { loop: - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex, - __FILE__, __LINE__)) { + if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { return; } @@ -573,8 +589,7 @@ loop: rw_lock_debug_waiters = TRUE; - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex, - __FILE__, __LINE__)) { + if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { return; } diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c index c0198469491..bf3f4d1ff20 100644 --- a/storage/innobase/sync/sync0sync.c +++ b/storage/innobase/sync/sync0sync.c @@ -115,6 +115,7 @@ ulint mutex_system_call_count = 0; /* Number of spin waits on mutexes: for performance monitoring */ +/* round=one iteration of a spin loop */ ulint mutex_spin_round_count = 0; ulint mutex_spin_wait_count = 0; ulint mutex_os_wait_count = 0; @@ -311,12 +312,13 @@ mutex_free( } /************************************************************************ -Tries to lock the mutex for the current thread. If the lock is not acquired -immediately, returns with return value 1. */ +NOTE! Use the corresponding macro in the header file, not this function +directly. Tries to lock the mutex for the current thread. If the lock is not +acquired immediately, returns with return value 1. */ ulint -mutex_enter_nowait( -/*===============*/ +mutex_enter_nowait_func( +/*====================*/ /* out: 0 if succeed, 1 if not */ mutex_t* mutex, /* in: pointer to mutex */ const char* file_name __attribute__((unused)), @@ -329,6 +331,7 @@ mutex_enter_nowait( if (!mutex_test_and_set(mutex)) { + ut_d(mutex->thread_id = os_thread_get_curr_id()); #ifdef UNIV_SYNC_DEBUG mutex_set_debug_info(mutex, file_name, line); #endif @@ -346,13 +349,29 @@ Checks that the mutex has been initialized. */ ibool mutex_validate( /*===========*/ - mutex_t* mutex) + const mutex_t* mutex) { ut_a(mutex); ut_a(mutex->magic_n == MUTEX_MAGIC_N); return(TRUE); } + +/********************************************************************** +Checks that the current thread owns the mutex. Works only in the debug +version. */ + +ibool +mutex_own( +/*======*/ + /* out: TRUE if owns */ + const mutex_t* mutex) /* in: mutex */ +{ + ut_ad(mutex_validate(mutex)); + + return(mutex_get_lock_word(mutex) == 1 + && os_thread_eq(mutex->thread_id, os_thread_get_curr_id())); +} #endif /* UNIV_DEBUG */ /********************************************************************** @@ -451,6 +470,7 @@ spin_loop: if (mutex_test_and_set(mutex) == 0) { /* Succeeded! */ + ut_d(mutex->thread_id = os_thread_get_curr_id()); #ifdef UNIV_SYNC_DEBUG mutex_set_debug_info(mutex, file_name, line); #endif @@ -492,6 +512,7 @@ spin_loop: sync_array_free_cell_protected(sync_primary_wait_array, index); + ut_d(mutex->thread_id = os_thread_get_curr_id()); #ifdef UNIV_SYNC_DEBUG mutex_set_debug_info(mutex, file_name, line); #endif @@ -592,7 +613,6 @@ mutex_set_debug_info( mutex->file_name = file_name; mutex->line = line; - mutex->thread_id = os_thread_get_curr_id(); } /********************************************************************** @@ -615,31 +635,6 @@ mutex_get_debug_info( } /********************************************************************** -Checks that the current thread owns the mutex. Works only in the debug -version. */ - -ibool -mutex_own( -/*======*/ - /* out: TRUE if owns */ - mutex_t* mutex) /* in: mutex */ -{ - ut_ad(mutex_validate(mutex)); - - if (mutex_get_lock_word(mutex) != 1) { - - return(FALSE); - } - - if (!os_thread_eq(mutex->thread_id, os_thread_get_curr_id())) { - - return(FALSE); - } - - return(TRUE); -} - -/********************************************************************** Prints debug info of currently reserved mutexes. */ static void diff --git a/storage/innobase/thr/Makefile.am b/storage/innobase/thr/Makefile.am deleted file mode 100644 index febcdf3e1a3..00000000000 --- a/storage/innobase/thr/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libthr.a - -libthr_a_SOURCES = thr0loc.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/thr/thr0loc.c b/storage/innobase/thr/thr0loc.c index f22e909f392..b803bd53101 100644 --- a/storage/innobase/thr/thr0loc.c +++ b/storage/innobase/thr/thr0loc.c @@ -64,9 +64,7 @@ thr_local_get( try_again: ut_ad(thr_local_hash); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&thr_local_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* Look for the local struct in the hash table */ diff --git a/storage/innobase/trx/Makefile.am b/storage/innobase/trx/Makefile.am deleted file mode 100644 index f9722454ef5..00000000000 --- a/storage/innobase/trx/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libtrx.a - -libtrx_a_SOURCES = trx0purge.c trx0rec.c trx0roll.c trx0rseg.c\ - trx0sys.c trx0trx.c trx0undo.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/trx/trx0purge.c b/storage/innobase/trx/trx0purge.c index 11e089ac90e..f0e85ef1604 100644 --- a/storage/innobase/trx/trx0purge.c +++ b/storage/innobase/trx/trx0purge.c @@ -197,9 +197,7 @@ void trx_purge_sys_create(void) /*======================*/ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ purge_sys = mem_alloc(sizeof(trx_purge_t)); @@ -223,7 +221,7 @@ trx_purge_sys_create(void) purge_sys->trx = purge_sys->sess->trx; - purge_sys->trx->type = TRX_PURGE; + purge_sys->trx->is_purge = 1; ut_a(trx_start_low(purge_sys->trx, ULINT_UNDEFINED)); @@ -260,9 +258,8 @@ trx_purge_add_update_undo_to_history( ut_ad(undo); rseg = undo->rseg; -#ifdef UNIV_SYNC_DEBUG + ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ rseg_header = trx_rsegf_get(rseg->space, rseg->page_no, mtr); @@ -341,9 +338,7 @@ trx_purge_free_segment( /* fputs("Freeing an update undo log segment\n", stderr); */ -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ loop: mtr_start(&mtr); mutex_enter(&(rseg->mutex)); @@ -445,9 +440,7 @@ trx_purge_truncate_rseg_history( ulint n_removed_logs = 0; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); mutex_enter(&(rseg->mutex)); @@ -537,9 +530,7 @@ trx_purge_truncate_history(void) dulint limit_trx_no; dulint limit_undo_no; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx_purge_arr_get_biggest(purge_sys->arr, &limit_trx_no, &limit_undo_no); @@ -579,9 +570,7 @@ trx_purge_truncate_if_arr_empty(void) /*=================================*/ /* out: TRUE if array empty */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (purge_sys->arr->n_used == 0) { @@ -610,9 +599,7 @@ trx_purge_rseg_get_next_history_log( ibool del_marks; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ mutex_enter(&(rseg->mutex)); @@ -715,9 +702,7 @@ trx_purge_choose_next_log(void) ulint offset = 0; /* remove warning (??? bug ???) */ mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(purge_sys->next_stored == FALSE); rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); @@ -818,9 +803,7 @@ trx_purge_get_next_rec( ulint cmpl_info; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(purge_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(purge_sys->next_stored); space = purge_sys->rseg->space; diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index 69e858fe71d..50f8b011463 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -1024,6 +1024,7 @@ trx_undo_report_row_operation( ibool is_insert; trx_rseg_t* rseg; mtr_t mtr; + ulint err = DB_SUCCESS; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; @@ -1035,7 +1036,7 @@ trx_undo_report_row_operation( *roll_ptr = ut_dulint_zero; - return(DB_SUCCESS); + return(err); } ut_ad(thr); @@ -1053,7 +1054,7 @@ trx_undo_report_row_operation( if (trx->insert_undo == NULL) { - trx_undo_assign_undo(trx, TRX_UNDO_INSERT); + err = trx_undo_assign_undo(trx, TRX_UNDO_INSERT); } undo = trx->insert_undo; @@ -1063,7 +1064,7 @@ trx_undo_report_row_operation( if (trx->update_undo == NULL) { - trx_undo_assign_undo(trx, TRX_UNDO_UPDATE); + err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE); } @@ -1071,11 +1072,11 @@ trx_undo_report_row_operation( is_insert = FALSE; } - if (undo == NULL) { - /* Did not succeed: out of space */ + if (err != DB_SUCCESS) { + /* Did not succeed: return the error encountered */ mutex_exit(&(trx->undo_mutex)); - return(DB_OUT_OF_FILE_SPACE); + return(err); } page_no = undo->last_page_no; @@ -1107,7 +1108,9 @@ trx_undo_report_row_operation( if (offset == 0) { /* The record did not fit on the page. We erase the end segment of the undo log page and write a log - record of it to to ensure deterministic contents. */ + record of it: this is to ensure that in the debug + version the replicate page constructed using the log + records stays identical to the original page */ trx_undo_erase_page_end(undo_page, &mtr); } @@ -1163,7 +1166,7 @@ trx_undo_report_row_operation( if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } - return(DB_SUCCESS); + return(err); } /*============== BUILDING PREVIOUS VERSION OF A RECORD ===============*/ diff --git a/storage/innobase/trx/trx0roll.c b/storage/innobase/trx/trx0roll.c index 201d1be3656..91dcf035f96 100644 --- a/storage/innobase/trx/trx0roll.c +++ b/storage/innobase/trx/trx0roll.c @@ -785,10 +785,8 @@ trx_roll_try_truncate( dulint limit; dulint biggest; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); ut_ad(mutex_own(&((trx->rseg)->mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx->pages_undone = 0; @@ -831,9 +829,7 @@ trx_roll_pop_top_rec( trx_undo_rec_t* prev_rec; page_t* prev_rec_page; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); -#endif /* UNIV_SYNC_DEBUG */ undo_page = trx_undo_page_get_s_latched(undo->space, undo->top_page_no, mtr); @@ -1060,9 +1056,7 @@ trx_rollback( que_thr_t* thr; /* que_thr_t* thr2; */ -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad((trx->undo_no_arr == NULL) || ((trx->undo_no_arr)->n_used == 0)); /* Initialize the rollback field in the transaction */ @@ -1131,9 +1125,7 @@ trx_roll_graph_build( que_thr_t* thr; /* que_thr_t* thr2; */ -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ heap = mem_heap_create(512); fork = que_fork_create(NULL, NULL, QUE_FORK_ROLLBACK, heap); @@ -1160,9 +1152,7 @@ trx_finish_error_processing( trx_sig_t* sig; trx_sig_t* next_sig; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ sig = UT_LIST_GET_FIRST(trx->signals); @@ -1195,9 +1185,7 @@ trx_finish_partial_rollback_off_kernel( { trx_sig_t* sig; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ sig = UT_LIST_GET_FIRST(trx->signals); @@ -1228,9 +1216,7 @@ trx_finish_rollback_off_kernel( trx_sig_t* sig; trx_sig_t* next_sig; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_a(trx->undo_no_arr == NULL || trx->undo_no_arr->n_used == 0); diff --git a/storage/innobase/trx/trx0rseg.c b/storage/innobase/trx/trx0rseg.c index 7a6989c7b4f..020f217c90b 100644 --- a/storage/innobase/trx/trx0rseg.c +++ b/storage/innobase/trx/trx0rseg.c @@ -60,9 +60,7 @@ trx_rseg_header_create( page_t* page; ut_ad(mtr); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space), MTR_MEMO_X_LOCK)); sys_header = trx_sysf_get(mtr); @@ -138,9 +136,7 @@ trx_rseg_mem_create( ulint sum_of_undo_sizes; ulint len; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ rseg = mem_alloc(sizeof(trx_rseg_t)); diff --git a/storage/innobase/trx/trx0sys.c b/storage/innobase/trx/trx0sys.c index b87f3d5e090..40348dd4199 100644 --- a/storage/innobase/trx/trx0sys.c +++ b/storage/innobase/trx/trx0sys.c @@ -547,9 +547,7 @@ trx_in_trx_list( { trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ trx = UT_LIST_GET_FIRST(trx_sys->trx_list); @@ -576,9 +574,7 @@ trx_sys_flush_max_trx_id(void) trx_sysf_t* sys_header; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); @@ -650,6 +646,7 @@ trx_sys_update_mysql_binlog_offset( MLOG_4BYTES, mtr); } +#ifdef UNIV_HOTBACKUP /********************************************************************* Prints to stderr the MySQL binlog info in the system header if the magic number shows it valid. */ @@ -681,6 +678,7 @@ trx_sys_print_mysql_binlog_offset_from_page( + TRX_SYS_MYSQL_LOG_NAME); } } +#endif /* UNIV_HOTBACKUP */ /********************************************************************* Stores the MySQL binlog offset info in the trx system header if @@ -799,9 +797,7 @@ trx_sysf_rseg_find_free( ulint page_no; ulint i; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(kernel_mutex))); -#endif /* UNIV_SYNC_DEBUG */ sys_header = trx_sysf_get(mtr); @@ -874,7 +870,16 @@ trx_sysf_create( trx_sysf_rseg_set_page_no(sys_header, i, FIL_NULL, mtr); } - /* The remaining area (up to the page trailer) is uninitialized. */ + /* The remaining area (up to the page trailer) is uninitialized. + Silence Valgrind warnings about it. */ + UNIV_MEM_VALID(sys_header + (TRX_SYS_RSEGS + + TRX_SYS_N_RSEGS * TRX_SYS_RSEG_SLOT_SIZE + + TRX_SYS_RSEG_SPACE), + (UNIV_PAGE_SIZE - FIL_PAGE_DATA_END + - (TRX_SYS_RSEGS + + TRX_SYS_N_RSEGS * TRX_SYS_RSEG_SLOT_SIZE + + TRX_SYS_RSEG_SPACE)) + + page - sys_header); /* Create the first rollback segment in the SYSTEM tablespace */ page_no = trx_rseg_header_create(TRX_SYS_SPACE, ULINT_MAX, &slot_no, diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c index cfa2b01f406..a278ad51984 100644 --- a/storage/innobase/trx/trx0trx.c +++ b/storage/innobase/trx/trx0trx.c @@ -25,6 +25,7 @@ Created 3/26/1996 Heikki Tuuri #include "btr0sea.h" #include "os0proc.h" #include "trx0xa.h" +#include "ha_prototypes.h" /* Copy of the prototype for innobase_mysql_print_thd: this copy MUST be equal to the one in mysql/sql/ha_innodb.cc ! */ @@ -101,9 +102,7 @@ trx_create( { trx_t* trx; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ trx = mem_alloc(sizeof(trx_t)); @@ -111,7 +110,7 @@ trx_create( trx->op_info = ""; - trx->type = TRX_USER; + trx->is_purge = 0; trx->conc_state = TRX_NOT_STARTED; trx->start_time = time(NULL); @@ -132,17 +131,14 @@ trx_create( trx->mysql_thd = NULL; trx->mysql_query_str = NULL; + trx->active_trans = 0; + trx->duplicates = 0; trx->n_mysql_tables_in_use = 0; trx->mysql_n_tables_locked = 0; trx->mysql_log_file_name = NULL; trx->mysql_log_offset = 0; - trx->mysql_master_log_file_name = ""; - trx->mysql_master_log_pos = 0; - - trx->repl_wait_binlog_name = NULL; - trx->repl_wait_binlog_pos = 0; mutex_create(&trx->undo_mutex, SYNC_TRX_UNDO); @@ -194,6 +190,8 @@ trx_create( memset(&trx->xid, 0, sizeof(trx->xid)); trx->xid.formatID = -1; + trx->n_autoinc_rows = 0; + trx_reset_new_rec_lock_info(trx); return(trx); @@ -280,9 +278,7 @@ trx_free( /*=====*/ trx_t* trx) /* in, own: trx object */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (trx->declared_to_be_inside_innodb) { ut_print_timestamp(stderr); @@ -324,11 +320,6 @@ trx_free( trx_undo_arr_free(trx->undo_no_arr); } - if (trx->repl_wait_binlog_name != NULL) { - - mem_free(trx->repl_wait_binlog_name); - } - ut_a(UT_LIST_GET_LEN(trx->signals) == 0); ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0); @@ -406,9 +397,7 @@ trx_list_insert_ordered( { trx_t* trx2; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ trx2 = UT_LIST_GET_FIRST(trx_sys->trx_list); @@ -633,9 +622,7 @@ trx_assign_rseg(void) { trx_rseg_t* rseg = trx_sys->latest_rseg; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ loop: /* Get next rseg in a round-robin fashion */ @@ -672,12 +659,10 @@ trx_start_low( { trx_rseg_t* rseg; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(trx->rseg == NULL); - if (trx->type == TRX_PURGE) { + if (trx->is_purge) { trx->id = ut_dulint_zero; trx->conc_state = TRX_ACTIVE; trx->start_time = time(NULL); @@ -749,9 +734,7 @@ trx_commit_off_kernel( ibool must_flush_log = FALSE; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ trx->must_flush_log_later = FALSE; @@ -816,14 +799,6 @@ trx_commit_off_kernel( trx->mysql_log_file_name = NULL; } - if (trx->mysql_master_log_file_name[0] != '\0') { - /* This database server is a MySQL replication slave */ - trx_sys_update_mysql_binlog_offset( - trx->mysql_master_log_file_name, - trx->mysql_master_log_pos, - TRX_SYS_MYSQL_MASTER_LOG_INFO, &mtr); - } - /* The following call commits the mini-transaction, making the whole transaction committed in the file-based world, at this log sequence number. The transaction becomes 'durable' when @@ -851,9 +826,7 @@ trx_commit_off_kernel( ut_ad(trx->conc_state == TRX_ACTIVE || trx->conc_state == TRX_PREPARED); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /* The following assignment makes the transaction committed in memory and makes its changes to data visible to other transactions. @@ -1036,9 +1009,7 @@ trx_handle_commit_sig_off_kernel( trx_sig_t* sig; trx_sig_t* next_sig; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ trx->que_state = TRX_QUE_COMMITTING; @@ -1078,9 +1049,7 @@ trx_end_lock_wait( { que_thr_t* thr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT); thr = UT_LIST_GET_FIRST(trx->wait_thrs); @@ -1107,9 +1076,7 @@ trx_lock_wait_to_suspended( { que_thr_t* thr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT); thr = UT_LIST_GET_FIRST(trx->wait_thrs); @@ -1137,9 +1104,7 @@ trx_sig_reply_wait_to_suspended( trx_sig_t* sig; que_thr_t* thr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ sig = UT_LIST_GET_FIRST(trx->reply_signals); @@ -1172,9 +1137,7 @@ trx_sig_is_compatible( { trx_sig_t* sig; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (UT_LIST_GET_LEN(trx->signals) == 0) { @@ -1260,9 +1223,7 @@ trx_sig_send( trx_t* receiver_trx; ut_ad(trx); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (!trx_sig_is_compatible(trx, type, sender)) { /* The signal is not compatible with the other signals in @@ -1288,7 +1249,6 @@ trx_sig_send( UT_LIST_ADD_LAST(signals, trx->signals, sig); sig->type = type; - sig->state = TRX_SIG_WAITING; sig->sender = sender; sig->receiver = receiver_thr; @@ -1332,9 +1292,7 @@ trx_end_signal_handling( /*====================*/ trx_t* trx) /* in: trx */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(trx->handling_signals == TRUE); trx->handling_signals = FALSE; @@ -1368,9 +1326,7 @@ loop: we can process immediately */ ut_ad(trx); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (trx->handling_signals && (UT_LIST_GET_LEN(trx->signals) == 0)) { @@ -1471,9 +1427,7 @@ trx_sig_reply( trx_t* receiver_trx; ut_ad(sig); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ if (sig->receiver != NULL) { ut_ad((sig->receiver)->state == QUE_THR_SIG_REPLY_WAIT); @@ -1501,9 +1455,7 @@ trx_sig_remove( trx_sig_t* sig) /* in, own: signal */ { ut_ad(trx && sig); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(sig->receiver == NULL); @@ -1602,19 +1554,21 @@ trx_commit_for_mysql( the transaction object does not have an InnoDB session object, and we set the dummy session that we use for all MySQL transactions. */ - mutex_enter(&kernel_mutex); - if (trx->sess == NULL) { /* Open a dummy session */ if (!trx_dummy_sess) { - trx_dummy_sess = sess_open(); + mutex_enter(&kernel_mutex); + + if (!trx_dummy_sess) { + trx_dummy_sess = sess_open(); + } + + mutex_exit(&kernel_mutex); } trx->sess = trx_dummy_sess; } - - mutex_exit(&kernel_mutex); trx_start_if_not_started(trx); @@ -1743,7 +1697,7 @@ trx_print( fputs(trx->op_info, f); } - if (trx->type != TRX_USER) { + if (trx->is_purge) { fputs(" purge trx", f); } @@ -1806,6 +1760,61 @@ trx_print( } } +/*********************************************************************** +Compares the "weight" (or size) of two transactions. The weight of one +transaction is estimated as the number of altered rows + the number of +locked rows. Transactions that have edited non-transactional tables are +considered heavier than ones that have not. */ + +int +trx_weight_cmp( +/*===========*/ + /* out: <0, 0 or >0; similar to strcmp(3) */ + trx_t* a, /* in: the first transaction to be compared */ + trx_t* b) /* in: the second transaction to be compared */ +{ + ibool a_notrans_edit; + ibool b_notrans_edit; + + /* If mysql_thd is NULL for a transaction we assume that it has + not edited non-transactional tables. */ + + a_notrans_edit = a->mysql_thd != NULL + && thd_has_edited_nontrans_tables(a->mysql_thd); + + b_notrans_edit = b->mysql_thd != NULL + && thd_has_edited_nontrans_tables(b->mysql_thd); + + if (a_notrans_edit && !b_notrans_edit) { + + return(1); + } + + if (!a_notrans_edit && b_notrans_edit) { + + return(-1); + } + + /* Either both had edited non-transactional tables or both had + not, we fall back to comparing the number of altered/locked + rows. */ + +#if 0 + fprintf(stderr, + "%s TRX_WEIGHT(a): %lld+%lu, TRX_WEIGHT(b): %lld+%lu\n", + __func__, + ut_conv_dulint_to_longlong(a->undo_no), + UT_LIST_GET_LEN(a->trx_locks), + ut_conv_dulint_to_longlong(b->undo_no), + UT_LIST_GET_LEN(b->trx_locks)); +#endif + +#define TRX_WEIGHT(t) \ + ut_dulint_add((t)->undo_no, UT_LIST_GET_LEN((t)->trx_locks)) + + return(ut_dulint_cmp(TRX_WEIGHT(a), TRX_WEIGHT(b))); +} + /******************************************************************** Prepares a transaction. */ @@ -1820,9 +1829,7 @@ trx_prepare_off_kernel( dulint lsn; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ rseg = trx->rseg; @@ -1868,9 +1875,7 @@ trx_prepare_off_kernel( mutex_enter(&kernel_mutex); } -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ /*--------------------------------------*/ trx->conc_state = TRX_PREPARED; @@ -1928,7 +1933,7 @@ Does the transaction prepare for MySQL. */ ulint trx_prepare_for_mysql( -/*====-=============*/ +/*==================*/ /* out: 0 or error number */ trx_t* trx) /* in: trx handle */ { @@ -2023,7 +2028,7 @@ trx_recover_for_mysql( (ulong) count); } - return (count); + return ((int) count); } /*********************************************************************** diff --git a/storage/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c index fbcfab38f01..64e5ad3c9a8 100644 --- a/storage/innobase/trx/trx0undo.c +++ b/storage/innobase/trx/trx0undo.c @@ -373,31 +373,34 @@ trx_undo_page_init( /******************************************************************* Creates a new undo log segment in file. */ static -page_t* +ulint trx_undo_seg_create( /*================*/ - /* out: segment header page x-latched, NULL - if no space left */ + /* out: DB_SUCCESS if page creation OK + possible error codes are: + DB_TOO_MANY_CONCURRENT_TRXS + DB_OUT_OF_FILE_SPACE */ trx_rseg_t* rseg __attribute__((unused)),/* in: rollback segment */ trx_rsegf_t* rseg_hdr,/* in: rollback segment header, page x-latched */ ulint type, /* in: type of the segment: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ ulint* id, /* out: slot index within rseg header */ + page_t** undo_page, + /* out: segment header page x-latched, NULL + if there was an error */ mtr_t* mtr) /* in: mtr */ { ulint slot_no; ulint space; - page_t* undo_page; trx_upagef_t* page_hdr; trx_usegf_t* seg_hdr; ulint n_reserved; ibool success; + ulint err = DB_SUCCESS; ut_ad(mtr && id && rseg_hdr); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ /* fputs(type == TRX_UNDO_INSERT ? "Creating insert undo log segment\n" @@ -412,7 +415,7 @@ trx_undo_seg_create( "InnoDB: many active transactions" " running concurrently?\n"); - return(NULL); + return(DB_TOO_MANY_CONCURRENT_TRXS); } space = buf_frame_get_space_id(rseg_hdr); @@ -421,30 +424,30 @@ trx_undo_seg_create( mtr); if (!success) { - return(NULL); + return(DB_OUT_OF_FILE_SPACE); } /* Allocate a new file segment for the undo log */ - undo_page = fseg_create_general(space, 0, + *undo_page = fseg_create_general(space, 0, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, TRUE, mtr); fil_space_release_free_extents(space, n_reserved); - if (undo_page == NULL) { + if (*undo_page == NULL) { /* No space left */ - return(NULL); + return(DB_OUT_OF_FILE_SPACE); } #ifdef UNIV_SYNC_DEBUG - buf_page_dbg_add_level(undo_page, SYNC_TRX_UNDO_PAGE); + buf_page_dbg_add_level(*undo_page, SYNC_TRX_UNDO_PAGE); #endif /* UNIV_SYNC_DEBUG */ - page_hdr = undo_page + TRX_UNDO_PAGE_HDR; - seg_hdr = undo_page + TRX_UNDO_SEG_HDR; + page_hdr = *undo_page + TRX_UNDO_PAGE_HDR; + seg_hdr = *undo_page + TRX_UNDO_SEG_HDR; - trx_undo_page_init(undo_page, type, mtr); + trx_undo_page_init(*undo_page, type, mtr); mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE, @@ -458,10 +461,11 @@ trx_undo_seg_create( page_hdr + TRX_UNDO_PAGE_NODE, mtr); trx_rsegf_set_nth_undo(rseg_hdr, slot_no, - buf_frame_get_page_no(undo_page), mtr); + buf_frame_get_page_no(*undo_page), mtr); + *id = slot_no; - return(undo_page); + return(err); } /************************************************************************** @@ -836,11 +840,9 @@ trx_undo_add_page( ulint n_reserved; ibool success; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); ut_ad(!mutex_own(&kernel_mutex)); ut_ad(mutex_own(&(trx->rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ rseg = trx->rseg; @@ -911,10 +913,8 @@ trx_undo_free_page( ulint hist_size; ut_a(hdr_page_no != page_no); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ undo_page = trx_undo_page_get(space, page_no, mtr); @@ -961,9 +961,7 @@ trx_undo_free_page_in_rollback( ulint last_page_no; ut_ad(undo->hdr_page_no != page_no); -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); -#endif /* UNIV_SYNC_DEBUG */ last_page_no = trx_undo_free_page(undo->rseg, FALSE, undo->space, undo->hdr_page_no, page_no, mtr); @@ -1016,10 +1014,8 @@ trx_undo_truncate_end( trx_rseg_t* rseg; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); ut_ad(mutex_own(&(trx->rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ rseg = trx->rseg; @@ -1096,9 +1092,7 @@ trx_undo_truncate_start( ulint page_no; mtr_t mtr; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (0 == ut_dulint_cmp(limit, ut_dulint_zero)) { @@ -1164,9 +1158,9 @@ trx_undo_seg_free( while (!finished) { mtr_start(&mtr); -#ifdef UNIV_SYNC_DEBUG + ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + mutex_enter(&(rseg->mutex)); seg_header = trx_undo_page_get(undo->space, undo->hdr_page_no, @@ -1389,9 +1383,7 @@ trx_undo_mem_create( { trx_undo_t* undo; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (id >= TRX_RSEG_N_SLOTS) { fprintf(stderr, @@ -1401,6 +1393,11 @@ trx_undo_mem_create( undo = mem_alloc(sizeof(trx_undo_t)); + if (undo == NULL) { + + return NULL; + } + undo->id = id; undo->type = type; undo->state = TRX_UNDO_ACTIVE; @@ -1437,11 +1434,9 @@ trx_undo_mem_init_for_reuse( XID* xid, /* in: X/Open XA transaction identification*/ ulint offset) /* in: undo log header byte offset on page */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&((undo->rseg)->mutex))); -#endif /* UNIV_SYNC_DEBUG */ - if (undo->id >= TRX_RSEG_N_SLOTS) { + if (UNIV_UNLIKELY(undo->id >= TRX_RSEG_N_SLOTS)) { fprintf(stderr, "InnoDB: Error: undo->id is %lu\n", (ulong) undo->id); @@ -1480,11 +1475,15 @@ trx_undo_mem_free( /************************************************************************** Creates a new undo log. */ static -trx_undo_t* +ulint trx_undo_create( /*============*/ - /* out: undo log object, NULL if did not - succeed: out of space */ + /* out: DB_SUCCESS if successful in creating + the new undo lob object, possible error + codes are: + DB_TOO_MANY_CONCURRENT_TRXS + DB_OUT_OF_FILE_SPACE + DB_OUT_OF_MEMORY*/ trx_t* trx, /* in: transaction */ trx_rseg_t* rseg, /* in: rollback segment memory copy */ ulint type, /* in: type of the log: TRX_UNDO_INSERT or @@ -1492,36 +1491,37 @@ trx_undo_create( dulint trx_id, /* in: id of the trx for which the undo log is created */ XID* xid, /* in: X/Open transaction identification*/ + trx_undo_t** undo, /* out: the new undo log object, undefined + * if did not succeed */ mtr_t* mtr) /* in: mtr */ { trx_rsegf_t* rseg_header; ulint page_no; ulint offset; ulint id; - trx_undo_t* undo; page_t* undo_page; + ulint err; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (rseg->curr_size == rseg->max_size) { - return(NULL); + return(DB_OUT_OF_FILE_SPACE); } rseg->curr_size++; rseg_header = trx_rsegf_get(rseg->space, rseg->page_no, mtr); - undo_page = trx_undo_seg_create(rseg, rseg_header, type, &id, mtr); + err = trx_undo_seg_create(rseg, rseg_header, type, &id, + &undo_page, mtr); - if (undo_page == NULL) { + if (err != DB_SUCCESS) { /* Did not succeed */ rseg->curr_size--; - return(NULL); + return(err); } page_no = buf_frame_get_page_no(undo_page); @@ -1533,9 +1533,14 @@ trx_undo_create( undo_page + offset, mtr); } - undo = trx_undo_mem_create(rseg, id, type, trx_id, xid, + *undo = trx_undo_mem_create(rseg, id, type, trx_id, xid, page_no, offset); - return(undo); + if (*undo == NULL) { + + err = DB_OUT_OF_MEMORY; + } + + return(err); } /*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/ @@ -1561,9 +1566,7 @@ trx_undo_reuse_cached( page_t* undo_page; ulint offset; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ if (type == TRX_UNDO_INSERT) { @@ -1654,46 +1657,42 @@ trx_undo_mark_as_dict_operation( Assigns an undo log for a transaction. A new undo log is created or a cached undo log reused. */ -trx_undo_t* +ulint trx_undo_assign_undo( /*=================*/ - /* out: the undo log, NULL if did not succeed: out of - space */ - trx_t* trx, /* in: transaction */ - ulint type) /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ + /* out: DB_SUCCESS if undo log assign + successful, possible error codes are: + DD_TOO_MANY_CONCURRENT_TRXS + DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY*/ + trx_t* trx, /* in: transaction */ + ulint type) /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ { trx_rseg_t* rseg; trx_undo_t* undo; mtr_t mtr; + ulint err = DB_SUCCESS; ut_ad(trx); ut_ad(trx->rseg); rseg = trx->rseg; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(trx->undo_mutex))); -#endif /* UNIV_SYNC_DEBUG */ mtr_start(&mtr); -#ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + mutex_enter(&(rseg->mutex)); undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, &trx->xid, &mtr); if (undo == NULL) { - undo = trx_undo_create(trx, rseg, type, trx->id, &trx->xid, - &mtr); - if (undo == NULL) { - /* Did not succeed */ - - mutex_exit(&(rseg->mutex)); - mtr_commit(&mtr); + err = trx_undo_create(trx, rseg, type, trx->id, &trx->xid, + &undo, &mtr); + if (err != DB_SUCCESS) { - return(NULL); + goto func_exit; } } @@ -1711,10 +1710,11 @@ trx_undo_assign_undo( trx_undo_mark_as_dict_operation(trx, undo, &mtr); } +func_exit: mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); - return(undo); + return err; } /********************************************************************** @@ -1836,9 +1836,8 @@ trx_undo_update_cleanup( undo = trx->update_undo; rseg = trx->rseg; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(rseg->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + trx_purge_add_update_undo_to_history(trx, undo_page, mtr); UT_LIST_REMOVE(undo_list, rseg->update_undo_list, undo); diff --git a/storage/innobase/usr/Makefile.am b/storage/innobase/usr/Makefile.am deleted file mode 100644 index ea485022f71..00000000000 --- a/storage/innobase/usr/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libusr.a - -libusr_a_SOURCES = usr0sess.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/usr/usr0sess.c b/storage/innobase/usr/usr0sess.c index ca97ea23e95..3740c05eaab 100644 --- a/storage/innobase/usr/usr0sess.c +++ b/storage/innobase/usr/usr0sess.c @@ -32,9 +32,8 @@ sess_open(void) { sess_t* sess; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + sess = mem_alloc(sizeof(sess_t)); sess->state = SESS_ACTIVE; @@ -54,9 +53,7 @@ sess_close( /*=======*/ sess_t* sess) /* in, own: session object */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ ut_ad(sess->trx == NULL); mem_free(sess); @@ -72,9 +69,8 @@ sess_try_close( /* out: TRUE if closed */ sess_t* sess) /* in, own: session object */ { -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); -#endif /* UNIV_SYNC_DEBUG */ + if (UT_LIST_GET_LEN(sess->graphs) == 0) { sess_close(sess); diff --git a/storage/innobase/ut/Makefile.am b/storage/innobase/ut/Makefile.am deleted file mode 100644 index d79184759c1..00000000000 --- a/storage/innobase/ut/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -include ../include/Makefile.i - -noinst_LIBRARIES = libut.a - -libut_a_SOURCES = ut0byte.c ut0dbg.c ut0mem.c ut0rnd.c ut0ut.c ut0vec.c ut0list.c ut0wqueue.c - -EXTRA_PROGRAMS = - -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/innobase/ut/ut0mem.c b/storage/innobase/ut/ut0mem.c index 4fd515c35e6..b466a5f6872 100644 --- a/storage/innobase/ut/ut0mem.c +++ b/storage/innobase/ut/ut0mem.c @@ -162,6 +162,8 @@ retry: #endif } + UNIV_MEM_ALLOC(ret, n + sizeof(ut_mem_block_t)); + ((ut_mem_block_t*)ret)->size = n + sizeof(ut_mem_block_t); ((ut_mem_block_t*)ret)->magic_n = UT_MEM_MAGIC_N; diff --git a/storage/innobase/ut/ut0ut.c b/storage/innobase/ut/ut0ut.c index d805cc3b4b2..389063ad821 100644 --- a/storage/innobase/ut/ut0ut.c +++ b/storage/innobase/ut/ut0ut.c @@ -14,12 +14,62 @@ Created 5/11/1994 Heikki Tuuri #include <stdarg.h> #include <string.h> +#include <ctype.h> #include "ut0sort.h" #include "trx0trx.h" ibool ut_always_false = FALSE; +#ifdef __WIN__ +/********************************************************************* +NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix +epoch starts from 1970/1/1. For selection of constant see: +http://support.microsoft.com/kb/167296/ */ +#define WIN_TO_UNIX_DELTA_USEC ((ib_longlong) 11644473600000000ULL) + + +/********************************************************************* +This is the Windows version of gettimeofday(2).*/ +static +int +ut_gettimeofday( +/*============*/ + /* out: 0 if all OK else -1 */ + struct timeval* tv, /* out: Values are relative to Unix epoch */ + void* tz) /* in: not used */ +{ + FILETIME ft; + ib_longlong tm; + + if (!tv) { + errno = EINVAL; + return(-1); + } + + GetSystemTimeAsFileTime(&ft); + + tm = (ib_longlong) ft.dwHighDateTime << 32; + tm |= ft.dwLowDateTime; + + ut_a(tm >= 0); /* If tm wraps over to negative, the quotient / 10 + does not work */ + + tm /= 10; /* Convert from 100 nsec periods to usec */ + + /* If we don't convert to the Unix epoch the value for + struct timeval::tv_sec will overflow.*/ + tm -= WIN_TO_UNIX_DELTA_USEC; + + tv->tv_sec = (long) (tm / 1000000L); + tv->tv_usec = (long) (tm % 1000000L); + + return(0); +} +#else +#define ut_gettimeofday gettimeofday +#endif + #ifndef UNIV_HOTBACKUP /********************************************************************* Display an SQL identifier. @@ -85,17 +135,11 @@ ut_usectime( ulint* sec, /* out: seconds since the Epoch */ ulint* ms) /* out: microseconds since the Epoch+*sec */ { -#ifdef __WIN__ - SYSTEMTIME st; - GetLocalTime(&st); - *sec = (ulint) st.wSecond; - *ms = (ulint) st.wMilliseconds; -#else struct timeval tv; - gettimeofday(&tv,NULL); + + ut_gettimeofday(&tv, NULL); *sec = (ulint) tv.tv_sec; *ms = (ulint) tv.tv_usec; -#endif } /************************************************************** diff --git a/storage/myisam/CMakeLists.txt b/storage/myisam/CMakeLists.txt index e1337b2e2ac..9d91bf0560a 100644..100755 --- a/storage/myisam/CMakeLists.txt +++ b/storage/myisam/CMakeLists.txt @@ -12,6 +12,7 @@ # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +INCLUDE("${PROJECT_SOURCE_DIR}/win/mysql_manifest.cmake") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") @@ -20,7 +21,8 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(myisam ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c ft_stem.c + +SET(MYISAM_SOURCES ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c ft_stem.c ha_myisam.cc ft_stopwords.c ft_update.c mi_cache.c mi_changed.c mi_check.c mi_checksum.c mi_close.c mi_create.c mi_dbug.c mi_delete.c @@ -32,14 +34,27 @@ ADD_LIBRARY(myisam ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c f mi_unique.c mi_update.c mi_write.c rt_index.c rt_key.c rt_mbr.c rt_split.c sort.c sp_key.c ft_eval.h myisamdef.h rt_index.h mi_rkey.c) -ADD_EXECUTABLE(myisam_ftdump myisam_ftdump.c) -TARGET_LINK_LIBRARIES(myisam_ftdump myisam mysys dbug strings zlib wsock32) +IF(NOT SOURCE_SUBLIBS) + + ADD_LIBRARY(myisam ${MYISAM_SOURCES}) + + ADD_EXECUTABLE(myisam_ftdump myisam_ftdump.c) + TARGET_LINK_LIBRARIES(myisam_ftdump myisam mysys debug dbug strings zlib wsock32) + + ADD_EXECUTABLE(myisamchk myisamchk.c) + TARGET_LINK_LIBRARIES(myisamchk myisam mysys debug dbug strings zlib wsock32) + + ADD_EXECUTABLE(myisamlog myisamlog.c) + TARGET_LINK_LIBRARIES(myisamlog myisam mysys debug dbug strings zlib wsock32) -ADD_EXECUTABLE(myisamchk myisamchk.c) -TARGET_LINK_LIBRARIES(myisamchk myisam mysys dbug strings zlib wsock32) + ADD_EXECUTABLE(myisampack myisampack.c) + TARGET_LINK_LIBRARIES(myisampack myisam mysys debug dbug strings zlib wsock32) -ADD_EXECUTABLE(myisamlog myisamlog.c) -TARGET_LINK_LIBRARIES(myisamlog myisam mysys dbug strings zlib wsock32) + IF(EMBED_MANIFESTS) + MYSQL_EMBED_MANIFEST("myisam_ftdump" "asInvoker") + MYSQL_EMBED_MANIFEST("myisamchk" "asInvoker") + MYSQL_EMBED_MANIFEST("myisamlog" "asInvoker") + MYSQL_EMBED_MANIFEST("myisampack" "asInvoker") + ENDIF(EMBED_MANIFESTS) -ADD_EXECUTABLE(myisampack myisampack.c) -TARGET_LINK_LIBRARIES(myisampack myisam mysys dbug strings zlib wsock32) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c index 68395d8abef..15f4e1e1d34 100644 --- a/storage/myisam/ft_boolean_search.c +++ b/storage/myisam/ft_boolean_search.c @@ -23,8 +23,14 @@ inside plus subtree. max_docid could be used by any word in plus subtree, but it could be updated by plus-word only. + Fulltext "smarter index merge" optimization assumes that rows + it gets are ordered by doc_id. That is not the case when we + search for a word with truncation operator. It may return + rows in random order. Thus we may not use "smarter index merge" + optimization with "trunc-words". + The idea is: there is no need to search for docid smaller than - biggest docid inside current plus subtree. + biggest docid inside current plus subtree or any upper plus subtree. Examples: +word1 word2 @@ -36,6 +42,13 @@ +(word1 -word2) +(+word3 word4) share same max_docid max_docid updated by word3 + +word1 word2 (+word3 word4 (+word5 word6)) + three subexpressions (including the top-level one), + every one has its own max_docid, updated by its plus word. + but for the search word6 uses + max(word1.max_docid, word3.max_docid, word5.max_docid), + while word4 uses, accordingly, + max(word1.max_docid, word3.max_docid). */ #define FT_CORE @@ -104,14 +117,14 @@ typedef struct st_ftb_word /* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */ my_off_t docid[2]; /* for index search and for scan */ my_off_t key_root; - my_off_t *max_docid; + FTB_EXPR *max_docid_expr; MI_KEYDEF *keyinfo; struct st_ftb_word *prev; float weight; uint ndepth; uint len; uchar off; - byte word[1]; + uchar word[1]; } FTB_WORD; typedef struct st_ft_info @@ -161,7 +174,7 @@ typedef struct st_my_ftb_param { FTB *ftb; FTB_EXPR *ftbe; - byte *up_quot; + uchar *up_quot; uint depth; } MY_FTB_PARAM; @@ -208,13 +221,13 @@ static int ftb_query_add_word(MYSQL_FTPARSER_PARAM *param, for (tmp_expr= ftb_param->ftbe; tmp_expr->up; tmp_expr= tmp_expr->up) if (! (tmp_expr->flags & FTB_FLAG_YES)) break; - ftbw->max_docid= &tmp_expr->max_docid; + ftbw->max_docid_expr= tmp_expr; /* fall through */ case FT_TOKEN_STOPWORD: if (! ftb_param->up_quot) break; phrase_word= (FT_WORD *)alloc_root(&ftb_param->ftb->mem_root, sizeof(FT_WORD)); tmp_element= (LIST *)alloc_root(&ftb_param->ftb->mem_root, sizeof(LIST)); - phrase_word->pos= word; + phrase_word->pos= (uchar*) word; phrase_word->len= word_len; tmp_element->data= (void *)phrase_word; ftb_param->ftbe->phrase= list_add(ftb_param->ftbe->phrase, tmp_element); @@ -240,7 +253,7 @@ static int ftb_query_add_word(MYSQL_FTPARSER_PARAM *param, if (info->yesno > 0) ftbe->up->ythresh++; ftb_param->ftbe= ftbe; ftb_param->depth++; - ftb_param->up_quot= info->quot; + ftb_param->up_quot= (uchar*) info->quot; break; case FT_TOKEN_RIGHT_PAREN: if (ftb_param->ftbe->document) @@ -274,20 +287,20 @@ static int ftb_parse_query_internal(MYSQL_FTPARSER_PARAM *param, MY_FTB_PARAM *ftb_param= param->mysql_ftparam; MYSQL_FTPARSER_BOOLEAN_INFO info; CHARSET_INFO *cs= ftb_param->ftb->charset; - char **start= &query; - char *end= query + len; + uchar **start= (uchar**) &query; + uchar *end= (uchar*) query + len; FT_WORD w; info.prev= ' '; info.quot= 0; while (ft_get_word(cs, start, end, &w, &info)) - param->mysql_add_word(param, w.pos, w.len, &info); + param->mysql_add_word(param, (char*) w.pos, w.len, &info); return(0); } -static void _ftb_parse_query(FTB *ftb, byte *query, uint len, - struct st_mysql_ftparser *parser) +static int _ftb_parse_query(FTB *ftb, uchar *query, uint len, + struct st_mysql_ftparser *parser) { MYSQL_FTPARSER_PARAM *param; MY_FTB_PARAM ftb_param; @@ -295,9 +308,9 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len, DBUG_ASSERT(parser); if (ftb->state != UNINITIALIZED) - DBUG_VOID_RETURN; + DBUG_RETURN(0); if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr, 0))) - DBUG_VOID_RETURN; + DBUG_RETURN(1); ftb_param.ftb= ftb; ftb_param.depth= 0; @@ -308,12 +321,11 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len, param->mysql_add_word= ftb_query_add_word; param->mysql_ftparam= (void *)&ftb_param; param->cs= ftb->charset; - param->doc= query; + param->doc= (char*) query; param->length= len; param->flags= 0; param->mode= MYSQL_FTPARSER_FULL_BOOLEAN_INFO; - parser->parse(param); - DBUG_VOID_RETURN; + DBUG_RETURN(parser->parse(param)); } @@ -331,7 +343,7 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search) my_bool can_go_down; MI_INFO *info=ftb->info; uint off, extra=HA_FT_WLEN+info->s->base.rec_reflength; - byte *lastkey_buf=ftbw->word+ftbw->off; + uchar *lastkey_buf=ftbw->word+ftbw->off; LINT_INIT(off); if (ftbw->flags & FTB_FLAG_TRUNC) @@ -348,11 +360,17 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search) else { uint sflag= SEARCH_BIGGER; - if (ftbw->docid[0] < *ftbw->max_docid) + my_off_t max_docid=0; + FTB_EXPR *tmp; + + for (tmp= ftbw->max_docid_expr; tmp; tmp= tmp->up) + set_if_bigger(max_docid, tmp->max_docid); + + if (ftbw->docid[0] < max_docid) { sflag|= SEARCH_SAME; _mi_dpointer(info, (uchar *)(ftbw->word + ftbw->len + HA_FT_WLEN), - *ftbw->max_docid); + max_docid); } r=_mi_search(info, ftbw->keyinfo, (uchar*) lastkey_buf, USE_WHOLE_KEY, sflag, ftbw->key_root); @@ -431,8 +449,8 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search) memcpy(lastkey_buf+off, info->lastkey, info->lastkey_length); } ftbw->docid[0]=info->lastpos; - if (ftbw->flags & FTB_FLAG_YES) - *ftbw->max_docid= info->lastpos; + if (ftbw->flags & FTB_FLAG_YES && !(ftbw->flags & FTB_FLAG_TRUNC)) + ftbw->max_docid_expr->max_docid= info->lastpos; return 0; } @@ -475,7 +493,8 @@ static void _ftb_init_index_search(FT_INFO *ftb) ftbe->up->flags|= FTB_FLAG_TRUNC, ftbe=ftbe->up) { if (ftbe->flags & FTB_FLAG_NO || /* 2 */ - ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */ + ftbe->up->ythresh - ftbe->up->yweaks > + (uint) test(ftbe->flags & FTB_FLAG_YES)) /* 1 */ { FTB_EXPR *top_ftbe=ftbe->up; ftbw->docid[0]=HA_OFFSET_ERROR; @@ -505,7 +524,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) } -FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, +FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, uchar *query, uint query_len, CHARSET_INFO *cs) { FTB *ftb; @@ -538,21 +557,22 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, ftbe->phrase= NULL; ftbe->document= 0; ftb->root=ftbe; - _ftb_parse_query(ftb, query, query_len, keynr == NO_SUCH_KEY ? - &ft_default_parser : - info->s->keyinfo[keynr].parser); + if (unlikely(_ftb_parse_query(ftb, query, query_len, + keynr == NO_SUCH_KEY ? &ft_default_parser : + info->s->keyinfo[keynr].parser))) + goto err; /* Hack: instead of init_queue, we'll use reinit queue to be able to alloc queue with alloc_root() */ - if (! (ftb->queue.root= (byte **)alloc_root(&ftb->mem_root, + if (! (ftb->queue.root= (uchar **)alloc_root(&ftb->mem_root, (ftb->queue.max_elements + 1) * sizeof(void *)))) goto err; reinit_queue(&ftb->queue, ftb->queue.max_elements, 0, 0, - (int (*)(void*, byte*, byte*))FTB_WORD_cmp, 0); + (int (*)(void*, uchar*, uchar*))FTB_WORD_cmp, 0); for (ftbw= ftb->last_word; ftbw; ftbw= ftbw->prev) - queue_insert(&ftb->queue, (byte *)ftbw); + queue_insert(&ftb->queue, (uchar *)ftbw); ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root, sizeof(FTB_WORD *)*ftb->queue.elements); memcpy(ftb->list, ftb->queue.root+1, sizeof(FTB_WORD *)*ftb->queue.elements); @@ -563,7 +583,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, return ftb; err: free_root(& ftb->mem_root, MYF(0)); - my_free((gptr)ftb,MYF(0)); + my_free((uchar*)ftb,MYF(0)); return 0; } @@ -586,7 +606,7 @@ static int ftb_phrase_add_word(MYSQL_FTPARSER_PARAM *param, MY_FTB_PHRASE_PARAM *phrase_param= param->mysql_ftparam; FT_WORD *w= (FT_WORD *)phrase_param->document->data; LIST *phrase, *document; - w->pos= word; + w->pos= (uchar*) word; w->len= word_len; phrase_param->document= phrase_param->document->prev; if (phrase_param->phrase_length > phrase_param->document_length) @@ -616,12 +636,13 @@ static int ftb_check_phrase_internal(MYSQL_FTPARSER_PARAM *param, { FT_WORD word; MY_FTB_PHRASE_PARAM *phrase_param= param->mysql_ftparam; - const char *docend= document + len; - while (ft_simple_get_word(phrase_param->cs, &document, docend, &word, FALSE)) + const uchar *docend= (uchar*) document + len; + while (ft_simple_get_word(phrase_param->cs, (uchar**) &document, docend, + &word, FALSE)) { - param->mysql_add_word(param, word.pos, word.len, 0); + param->mysql_add_word(param, (char*) word.pos, word.len, 0); if (phrase_param->match) - return 1; + break; } return 0; } @@ -639,9 +660,10 @@ static int ftb_check_phrase_internal(MYSQL_FTPARSER_PARAM *param, RETURN VALUE 1 is returned if phrase found, 0 else. + -1 is returned if error occurs. */ -static int _ftb_check_phrase(FTB *ftb, const byte *document, uint len, +static int _ftb_check_phrase(FTB *ftb, const uchar *document, uint len, FTB_EXPR *ftbe, struct st_mysql_ftparser *parser) { MY_FTB_PHRASE_PARAM ftb_param; @@ -663,21 +685,22 @@ static int _ftb_check_phrase(FTB *ftb, const byte *document, uint len, param->mysql_add_word= ftb_phrase_add_word; param->mysql_ftparam= (void *)&ftb_param; param->cs= ftb->charset; - param->doc= (byte *)document; + param->doc= (char *) document; param->length= len; param->flags= 0; param->mode= MYSQL_FTPARSER_WITH_STOPWORDS; - parser->parse(param); + if (unlikely(parser->parse(param))) + return -1; DBUG_RETURN(ftb_param.match ? 1 : 0); } -static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig) +static int _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig) { FT_SEG_ITERATOR ftsi; FTB_EXPR *ftbe; float weight=ftbw->weight; - int yn=ftbw->flags, ythresh, mode=(ftsi_orig != 0); + int yn_flag= ftbw->flags, ythresh, mode=(ftsi_orig != 0); my_off_t curdoc=ftbw->docid[mode]; struct st_mysql_ftparser *parser= ftb->keynr == NO_SUCH_KEY ? &ft_default_parser : @@ -694,41 +717,43 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ } if (ftbe->nos) break; - if (yn & FTB_FLAG_YES) + if (yn_flag & FTB_FLAG_YES) { weight /= ftbe->ythresh; ftbe->cur_weight += weight; if ((int) ++ftbe->yesses == ythresh) { - yn=ftbe->flags; + yn_flag=ftbe->flags; weight=ftbe->cur_weight*ftbe->weight; if (mode && ftbe->phrase) { - int not_found=1; + int found= 0; memcpy(&ftsi, ftsi_orig, sizeof(ftsi)); - while (_mi_ft_segiterator(&ftsi) && not_found) + while (_mi_ft_segiterator(&ftsi) && !found) { if (!ftsi.pos) continue; - not_found = ! _ftb_check_phrase(ftb, ftsi.pos, ftsi.len, - ftbe, parser); + found= _ftb_check_phrase(ftb, ftsi.pos, ftsi.len, ftbe, parser); + if (unlikely(found < 0)) + return 1; } - if (not_found) break; + if (!found) + break; } /* ftbe->quot */ } else break; } else - if (yn & FTB_FLAG_NO) + if (yn_flag & FTB_FLAG_NO) { /* NOTE: special sort function of queue assures that all - (yn & FTB_FLAG_NO) != 0 + (yn_flag & FTB_FLAG_NO) != 0 events for every particular subexpression will "auto-magically" happen BEFORE all the - (yn & FTB_FLAG_YES) != 0 events. So no + (yn_flag & FTB_FLAG_YES) != 0 events. So no already matched expression can become not-matched again. */ ++ftbe->nos; @@ -741,11 +766,12 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ ftbe->cur_weight += weight; if ((int) ftbe->yesses < ythresh) break; - if (!(yn & FTB_FLAG_WONLY)) - yn= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ; + if (!(yn_flag & FTB_FLAG_WONLY)) + yn_flag= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ; weight*= ftbe->weight; } } + return 0; } @@ -778,7 +804,11 @@ int ft_boolean_read_next(FT_INFO *ftb, char *record) { while (curdoc == (ftbw=(FTB_WORD *)queue_top(& ftb->queue))->docid[0]) { - _ftb_climb_the_tree(ftb, ftbw, 0); + if (unlikely(_ftb_climb_the_tree(ftb, ftbw, 0))) + { + my_errno= HA_ERR_OUT_OF_MEM; + goto err; + } /* update queue */ _ft2_search(ftb, ftbw, 0); @@ -800,10 +830,11 @@ int ft_boolean_read_next(FT_INFO *ftb, char *record) /* Clear all states, except that the table was updated */ info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); - if (!(*info->read_record)(info,curdoc,record)) + if (!(*info->read_record)(info,curdoc, (uchar*) record)) { info->update|= HA_STATE_AKTIV; /* Record is read */ - if (ftb->with_scan && ft_boolean_find_relevance(ftb,record,0)==0) + if (ftb->with_scan && + ft_boolean_find_relevance(ftb,(uchar*) record,0)==0) continue; /* no match */ my_errno=0; goto err; @@ -854,7 +885,8 @@ static int ftb_find_relevance_add_word(MYSQL_FTPARSER_PARAM *param, if (ftbw->docid[1] == ftb->info->lastpos) continue; ftbw->docid[1]= ftb->info->lastpos; - _ftb_climb_the_tree(ftb, ftbw, ftb_param->ftsi); + if (unlikely(_ftb_climb_the_tree(ftb, ftbw, ftb_param->ftsi))) + return 1; } return(0); } @@ -865,15 +897,15 @@ static int ftb_find_relevance_parse(MYSQL_FTPARSER_PARAM *param, { MY_FTB_FIND_PARAM *ftb_param= param->mysql_ftparam; FT_INFO *ftb= ftb_param->ftb; - char *end= doc + len; + uchar *end= (uchar*) doc + len; FT_WORD w; - while (ft_simple_get_word(ftb->charset, &doc, end, &w, TRUE)) - param->mysql_add_word(param, w.pos, w.len, 0); + while (ft_simple_get_word(ftb->charset, (uchar**) &doc, end, &w, TRUE)) + param->mysql_add_word(param, (char*) w.pos, w.len, 0); return(0); } -float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length) +float ft_boolean_find_relevance(FT_INFO *ftb, uchar *record, uint length) { FTB_EXPR *ftbe; FT_SEG_ITERATOR ftsi, ftsi2; @@ -924,9 +956,10 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length) { if (!ftsi.pos) continue; - param->doc= (byte *)ftsi.pos; + param->doc= (char *)ftsi.pos; param->length= ftsi.len; - parser->parse(param); + if (unlikely(parser->parse(param))) + return 0; } ftbe=ftb->root; if (ftbe->docid[1]==docid && ftbe->cur_weight>0 && @@ -948,7 +981,7 @@ void ft_boolean_close_search(FT_INFO *ftb) delete_tree(& ftb->no_dupes); } free_root(& ftb->mem_root, MYF(0)); - my_free((gptr)ftb,MYF(0)); + my_free((uchar*)ftb,MYF(0)); } diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c index 5c6f66897ee..282fa6751d8 100644 --- a/storage/myisam/ft_nlq_search.c +++ b/storage/myisam/ft_nlq_search.c @@ -83,7 +83,7 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) word->weight=LWS_FOR_QUERY; - keylen=_ft_make_key(info,aio->keynr,(char*) keybuff,word,0); + keylen=_ft_make_key(info,aio->keynr,keybuff,word,0); keylen-=HA_FT_WLEN; doc_cnt=0; @@ -189,7 +189,7 @@ static int walk_and_push(FT_SUPERDOC *from, DBUG_ENTER("walk_and_copy"); from->doc.weight+=from->tmp_weight*from->word_ptr->weight; set_if_smaller(best->elements, ft_query_expansion_limit-1); - queue_insert(best, (byte *)& from->doc); + queue_insert(best, (uchar *)& from->doc); DBUG_RETURN(0); } @@ -201,8 +201,8 @@ static int FT_DOC_cmp(void *unused __attribute__((unused)), } -FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, byte *query, - uint query_len, uint flags, byte *record) +FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query, + uint query_len, uint flags, uchar *record) { TREE wtree; ALL_IN_ONE aio; @@ -257,8 +257,12 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, byte *query, { info->update|= HA_STATE_AKTIV; ftparser_param->flags= MYSQL_FTFLAGS_NEED_COPY; - _mi_ft_parse(&wtree, info, keynr, record, ftparser_param, - &wtree.mem_root); + if (unlikely(_mi_ft_parse(&wtree, info, keynr, record, ftparser_param, + &wtree.mem_root))) + { + delete_queue(&best); + goto err; + } } } delete_queue(&best); @@ -313,7 +317,7 @@ int ft_nlq_read_next(FT_INFO *handler, char *record) info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); info->lastpos=handler->doc[handler->curdoc].dpos; - if (!(*info->read_record)(info,info->lastpos,record)) + if (!(*info->read_record)(info,info->lastpos,(uchar*) record)) { info->update|= HA_STATE_AKTIV; /* Record is read */ return 0; @@ -323,7 +327,7 @@ int ft_nlq_read_next(FT_INFO *handler, char *record) float ft_nlq_find_relevance(FT_INFO *handler, - byte *record __attribute__((unused)), + uchar *record __attribute__((unused)), uint length __attribute__((unused))) { int a,b,c; @@ -352,7 +356,7 @@ float ft_nlq_find_relevance(FT_INFO *handler, void ft_nlq_close_search(FT_INFO *handler) { - my_free((gptr)handler,MYF(0)); + my_free((uchar*)handler,MYF(0)); } diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c index 5992d9c118e..df2423aa50f 100644 --- a/storage/myisam/ft_parser.c +++ b/storage/myisam/ft_parser.c @@ -78,12 +78,12 @@ FT_WORD * ft_linearize(TREE *wtree, MEM_ROOT *mem_root) DBUG_RETURN(wlist); } -my_bool ft_boolean_check_syntax_string(const byte *str) +my_bool ft_boolean_check_syntax_string(const uchar *str) { uint i, j; if (!str || - (strlen(str)+1 != sizeof(ft_boolean_syntax)) || + (strlen((char*) str)+1 != sizeof(ft_boolean_syntax)) || (str[0] != ' ' && str[1] != ' ')) return 1; for (i=0; i<sizeof(ft_boolean_syntax); i++) @@ -106,12 +106,13 @@ my_bool ft_boolean_check_syntax_string(const byte *str) 3 - right bracket 4 - stopword found */ -byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, - FT_WORD *word, MYSQL_FTPARSER_BOOLEAN_INFO *param) +uchar ft_get_word(CHARSET_INFO *cs, uchar **start, uchar *end, + FT_WORD *word, MYSQL_FTPARSER_BOOLEAN_INFO *param) { - byte *doc=*start; + uchar *doc=*start; int ctype; - uint mwc, length, mbl; + uint mwc, length; + int mbl; param->yesno=(FTB_YES==' ') ? 1 : (param->quot != 0); param->weight_adjust= param->wasign= 0; @@ -119,14 +120,14 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, while (doc<end) { - for (; doc < end; doc+= (mbl > 0 ? mbl : 1)) + for (; doc < end; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) break; if (*doc == FTB_RQUOT && param->quot) { - param->quot=doc; + param->quot= (char*) doc; *start=doc+1; param->type= FT_TOKEN_RIGHT_PAREN; goto ret; @@ -137,7 +138,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, { /* param->prev=' '; */ *start=doc+1; - if (*doc == FTB_LQUOT) param->quot=*start; + if (*doc == FTB_LQUOT) + param->quot= (char*) *start; param->type= (*doc == FTB_RBR ? FT_TOKEN_RIGHT_PAREN : FT_TOKEN_LEFT_PAREN); goto ret; } @@ -157,7 +159,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, } mwc=length=0; - for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1)) + for (word->pos= doc; doc < end; length++, + doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) @@ -172,7 +175,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, if ((param->trunc=(doc<end && *doc == FTB_TRUNC))) doc++; - if (((length >= ft_min_word_len && !is_stopword(word->pos, word->len)) + if (((length >= ft_min_word_len && !is_stopword((char*) word->pos, + word->len)) || param->trunc) && length < ft_max_word_len) { *start=doc; @@ -188,7 +192,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end, } if (param->quot) { - param->quot=*start=doc; + *start= doc; + param->quot= (char*) doc; param->type= 3; /* FT_RBR */ goto ret; } @@ -196,17 +201,18 @@ ret: return param->type; } -byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end, - FT_WORD *word, my_bool skip_stopwords) +uchar ft_simple_get_word(CHARSET_INFO *cs, uchar **start, const uchar *end, + FT_WORD *word, my_bool skip_stopwords) { - byte *doc= *start; - uint mwc, length, mbl; + uchar *doc= *start; + uint mwc, length; + int mbl; int ctype; DBUG_ENTER("ft_simple_get_word"); do { - for (;; doc+= (mbl > 0 ? mbl : 1)) + for (;; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { if (doc >= end) DBUG_RETURN(0); @@ -216,7 +222,8 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end, } mwc= length= 0; - for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1)) + for (word->pos= doc; doc < end; length++, + doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end); if (true_word_char(ctype, *doc)) @@ -231,7 +238,7 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end, if (skip_stopwords == FALSE || (length >= ft_min_word_len && length < ft_max_word_len && - !is_stopword(word->pos, word->len))) + !is_stopword((char*) word->pos, word->len))) { *start= doc; DBUG_RETURN(1); @@ -260,14 +267,14 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param, wtree= ft_param->wtree; if (param->flags & MYSQL_FTFLAGS_NEED_COPY) { - byte *ptr; + uchar *ptr; DBUG_ASSERT(wtree->with_delete == 0); - ptr= (byte *)alloc_root(ft_param->mem_root, word_len); + ptr= (uchar *)alloc_root(ft_param->mem_root, word_len); memcpy(ptr, word, word_len); w.pos= ptr; } else - w.pos= word; + w.pos= (uchar*) word; w.len= word_len; if (!tree_insert(wtree, &w, 0, wtree->custom_arg)) { @@ -279,24 +286,25 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param, static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param, - char *doc, int doc_len) + char *doc_arg, int doc_len) { - byte *end=doc+doc_len; + uchar *doc= (uchar*) doc_arg; + uchar *end= doc + doc_len; MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam; TREE *wtree= ft_param->wtree; FT_WORD w; DBUG_ENTER("ft_parse_internal"); while (ft_simple_get_word(wtree->custom_arg, &doc, end, &w, TRUE)) - if (param->mysql_add_word(param, w.pos, w.len, 0)) + if (param->mysql_add_word(param, (char*) w.pos, w.len, 0)) DBUG_RETURN(1); DBUG_RETURN(0); } -int ft_parse(TREE *wtree, byte *doc, int doclen, - struct st_mysql_ftparser *parser, - MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root) +int ft_parse(TREE *wtree, uchar *doc, int doclen, + struct st_mysql_ftparser *parser, + MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root) { MY_FT_PARSER_PARAM my_param; DBUG_ENTER("ft_parse"); @@ -309,7 +317,7 @@ int ft_parse(TREE *wtree, byte *doc, int doclen, param->mysql_add_word= ft_add_word; param->mysql_ftparam= &my_param; param->cs= wtree->custom_arg; - param->doc= doc; + param->doc= (char*) doc; param->length= doclen; param->mode= MYSQL_FTPARSER_SIMPLE_MODE; DBUG_RETURN(parser->parse(param)); @@ -387,7 +395,9 @@ MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info, mysql_add_word == 0 - parser is not initialized mysql_add_word != 0 - parser is initialized, or no initialization needed. */ - info->ftparser_param[ftparser_nr].mysql_add_word= (void *)1; + info->ftparser_param[ftparser_nr].mysql_add_word= + (int (*)(struct st_mysql_ftparser_param *, char *, int, + MYSQL_FTPARSER_BOOLEAN_INFO *)) 1; if (parser->init && parser->init(&info->ftparser_param[ftparser_nr])) return 0; } diff --git a/storage/myisam/ft_static.c b/storage/myisam/ft_static.c index 34608be1721..610c20eede6 100644 --- a/storage/myisam/ft_static.c +++ b/storage/myisam/ft_static.c @@ -56,8 +56,8 @@ const struct _ft_vft _ft_vft_boolean = { FT_INFO *ft_init_search(uint flags, void *info, uint keynr, - byte *query, uint query_len, CHARSET_INFO *cs, - byte *record) + uchar *query, uint query_len, CHARSET_INFO *cs, + uchar *record) { FT_INFO *res; if (flags & FT_BOOL) diff --git a/storage/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c index 63732ebadc9..59866d9a351 100644 --- a/storage/myisam/ft_stopwords.c +++ b/storage/myisam/ft_stopwords.c @@ -38,7 +38,7 @@ static void FT_STOPWORD_free(FT_STOPWORD *w, TREE_FREE action, void *arg __attribute__((unused))) { if (action == free_free) - my_free((gptr) w->pos, MYF(0)); + my_free((uchar*) w->pos, MYF(0)); } static int ft_add_stopword(const char *w) @@ -65,7 +65,7 @@ int ft_init_stopwords() { File fd; uint len; - byte *buffer, *start, *end; + uchar *buffer, *start, *end; FT_WORD w; int error=-1; @@ -82,7 +82,7 @@ int ft_init_stopwords() end=start+len; while (ft_simple_get_word(default_charset_info, &start, end, &w, TRUE)) { - if (ft_add_stopword(my_strndup(w.pos, w.len, MYF(0)))) + if (ft_add_stopword(my_strndup((char*) w.pos, w.len, MYF(0)))) goto err1; } error=0; diff --git a/storage/myisam/ft_update.c b/storage/myisam/ft_update.c index e176d550b1d..e3e4c62158f 100644 --- a/storage/myisam/ft_update.c +++ b/storage/myisam/ft_update.c @@ -20,7 +20,7 @@ #include "ftdefs.h" #include <math.h> -void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const byte *record, +void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const uchar *record, FT_SEG_ITERATOR *ftsi) { DBUG_ENTER("_mi_ft_segiterator_init"); @@ -31,7 +31,7 @@ void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const byte *record, DBUG_VOID_RETURN; } -void _mi_ft_segiterator_dummy_init(const byte *record, uint len, +void _mi_ft_segiterator_dummy_init(const uchar *record, uint len, FT_SEG_ITERATOR *ftsi) { DBUG_ENTER("_mi_ft_segiterator_dummy_init"); @@ -94,7 +94,7 @@ uint _mi_ft_segiterator(register FT_SEG_ITERATOR *ftsi) /* parses a document i.e. calls ft_parse for every keyseg */ -uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const byte *record, +uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const uchar *record, MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root) { FT_SEG_ITERATOR ftsi; @@ -108,13 +108,13 @@ uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const byte *record, while (_mi_ft_segiterator(&ftsi)) { if (ftsi.pos) - if (ft_parse(parsed, (byte *)ftsi.pos, ftsi.len, parser, param, mem_root)) + if (ft_parse(parsed, (uchar *)ftsi.pos, ftsi.len, parser, param, mem_root)) DBUG_RETURN(1); } DBUG_RETURN(0); } -FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const byte *record, +FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const uchar *record, MEM_ROOT *mem_root) { TREE ptree; @@ -130,7 +130,7 @@ FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const byte *record, DBUG_RETURN(ft_linearize(&ptree, mem_root)); } -static int _mi_ft_store(MI_INFO *info, uint keynr, byte *keybuf, +static int _mi_ft_store(MI_INFO *info, uint keynr, uchar *keybuf, FT_WORD *wlist, my_off_t filepos) { uint key_length; @@ -145,7 +145,7 @@ static int _mi_ft_store(MI_INFO *info, uint keynr, byte *keybuf, DBUG_RETURN(0); } -static int _mi_ft_erase(MI_INFO *info, uint keynr, byte *keybuf, +static int _mi_ft_erase(MI_INFO *info, uint keynr, uchar *keybuf, FT_WORD *wlist, my_off_t filepos) { uint key_length, err=0; @@ -168,7 +168,7 @@ static int _mi_ft_erase(MI_INFO *info, uint keynr, byte *keybuf, #define THOSE_TWO_DAMN_KEYS_ARE_REALLY_DIFFERENT 1 #define GEE_THEY_ARE_ABSOLUTELY_IDENTICAL 0 -int _mi_ft_cmp(MI_INFO *info, uint keynr, const byte *rec1, const byte *rec2) +int _mi_ft_cmp(MI_INFO *info, uint keynr, const uchar *rec1, const uchar *rec2) { FT_SEG_ITERATOR ftsi1, ftsi2; CHARSET_INFO *cs=info->s->keyinfo[keynr].seg->charset; @@ -190,8 +190,8 @@ int _mi_ft_cmp(MI_INFO *info, uint keynr, const byte *rec1, const byte *rec2) /* update a document entry */ -int _mi_ft_update(MI_INFO *info, uint keynr, byte *keybuf, - const byte *oldrec, const byte *newrec, my_off_t pos) +int _mi_ft_update(MI_INFO *info, uint keynr, uchar *keybuf, + const uchar *oldrec, const uchar *newrec, my_off_t pos) { int error= -1; FT_WORD *oldlist,*newlist, *old_word, *new_word; @@ -241,7 +241,7 @@ err: /* adds a document to the collection */ -int _mi_ft_add(MI_INFO *info, uint keynr, byte *keybuf, const byte *record, +int _mi_ft_add(MI_INFO *info, uint keynr, uchar *keybuf, const uchar *record, my_off_t pos) { int error= -1; @@ -260,7 +260,7 @@ int _mi_ft_add(MI_INFO *info, uint keynr, byte *keybuf, const byte *record, /* removes a document from the collection */ -int _mi_ft_del(MI_INFO *info, uint keynr, byte *keybuf, const byte *record, +int _mi_ft_del(MI_INFO *info, uint keynr, uchar *keybuf, const uchar *record, my_off_t pos) { int error= -1; @@ -276,10 +276,10 @@ int _mi_ft_del(MI_INFO *info, uint keynr, byte *keybuf, const byte *record, DBUG_RETURN(error); } -uint _ft_make_key(MI_INFO *info, uint keynr, byte *keybuf, FT_WORD *wptr, +uint _ft_make_key(MI_INFO *info, uint keynr, uchar *keybuf, FT_WORD *wptr, my_off_t filepos) { - byte buf[HA_FT_MAXBYTELEN+16]; + uchar buf[HA_FT_MAXBYTELEN+16]; DBUG_ENTER("_ft_make_key"); #if HA_FT_WTYPE == HA_KEYTYPE_FLOAT diff --git a/storage/myisam/ftdefs.h b/storage/myisam/ftdefs.h index 26f5e4f266e..22443807b87 100644 --- a/storage/myisam/ftdefs.h +++ b/storage/myisam/ftdefs.h @@ -96,44 +96,44 @@ #define FTB_RQUOT (ft_boolean_syntax[11]) typedef struct st_ft_word { - byte * pos; + uchar * pos; uint len; double weight; } FT_WORD; int is_stopword(char *word, uint len); -uint _ft_make_key(MI_INFO *, uint , byte *, FT_WORD *, my_off_t); +uint _ft_make_key(MI_INFO *, uint , uchar *, FT_WORD *, my_off_t); -byte ft_get_word(CHARSET_INFO *, byte **, byte *, FT_WORD *, - MYSQL_FTPARSER_BOOLEAN_INFO *); -byte ft_simple_get_word(CHARSET_INFO *, byte **, const byte *, - FT_WORD *, my_bool); +uchar ft_get_word(CHARSET_INFO *, uchar **, uchar *, FT_WORD *, + MYSQL_FTPARSER_BOOLEAN_INFO *); +uchar ft_simple_get_word(CHARSET_INFO *, uchar **, const uchar *, + FT_WORD *, my_bool); typedef struct _st_ft_seg_iterator { uint num, len; HA_KEYSEG *seg; - const byte *rec, *pos; + const uchar *rec, *pos; } FT_SEG_ITERATOR; -void _mi_ft_segiterator_init(MI_INFO *, uint, const byte *, FT_SEG_ITERATOR *); -void _mi_ft_segiterator_dummy_init(const byte *, uint, FT_SEG_ITERATOR *); +void _mi_ft_segiterator_init(MI_INFO *, uint, const uchar *, FT_SEG_ITERATOR *); +void _mi_ft_segiterator_dummy_init(const uchar *, uint, FT_SEG_ITERATOR *); uint _mi_ft_segiterator(FT_SEG_ITERATOR *); void ft_parse_init(TREE *, CHARSET_INFO *); -int ft_parse(TREE *, byte *, int, struct st_mysql_ftparser *parser, +int ft_parse(TREE *, uchar *, int, struct st_mysql_ftparser *parser, MYSQL_FTPARSER_PARAM *, MEM_ROOT *); FT_WORD * ft_linearize(TREE *, MEM_ROOT *); -FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const byte *, MEM_ROOT *); -uint _mi_ft_parse(TREE *, MI_INFO *, uint, const byte *, +FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const uchar *, MEM_ROOT *); +uint _mi_ft_parse(TREE *, MI_INFO *, uint, const uchar *, MYSQL_FTPARSER_PARAM *, MEM_ROOT *); -FT_INFO *ft_init_nlq_search(MI_INFO *, uint, byte *, uint, uint, byte *); -FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint, CHARSET_INFO *); +FT_INFO *ft_init_nlq_search(MI_INFO *, uint, uchar *, uint, uint, uchar *); +FT_INFO *ft_init_boolean_search(MI_INFO *, uint, uchar *, uint, CHARSET_INFO *); extern const struct _ft_vft _ft_vft_nlq; int ft_nlq_read_next(FT_INFO *, char *); -float ft_nlq_find_relevance(FT_INFO *, byte *, uint); +float ft_nlq_find_relevance(FT_INFO *, uchar *, uint); void ft_nlq_close_search(FT_INFO *); float ft_nlq_get_relevance(FT_INFO *); my_off_t ft_nlq_get_docid(FT_INFO *); @@ -141,7 +141,7 @@ void ft_nlq_reinit_search(FT_INFO *); extern const struct _ft_vft _ft_vft_boolean; int ft_boolean_read_next(FT_INFO *, char *); -float ft_boolean_find_relevance(FT_INFO *, byte *, uint); +float ft_boolean_find_relevance(FT_INFO *, uchar *, uint); void ft_boolean_close_search(FT_INFO *); float ft_boolean_get_relevance(FT_INFO *); my_off_t ft_boolean_get_docid(FT_INFO *); diff --git a/storage/myisam/fulltext.h b/storage/myisam/fulltext.h index bea2fa96969..856e93e034d 100644 --- a/storage/myisam/fulltext.h +++ b/storage/myisam/fulltext.h @@ -29,9 +29,9 @@ extern const HA_KEYSEG ft_keysegs[FT_SEGS]; -int _mi_ft_cmp(MI_INFO *, uint, const byte *, const byte *); -int _mi_ft_add(MI_INFO *, uint, byte *, const byte *, my_off_t); -int _mi_ft_del(MI_INFO *, uint, byte *, const byte *, my_off_t); +int _mi_ft_cmp(MI_INFO *, uint, const uchar *, const uchar *); +int _mi_ft_add(MI_INFO *, uint, uchar *, const uchar *, my_off_t); +int _mi_ft_del(MI_INFO *, uint, uchar *, const uchar *, my_off_t); uint _mi_ft_convert_to_ft2(MI_INFO *, uint, uchar *); diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 397856a4a4e..bc6e5706c21 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -84,6 +84,14 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, } length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) - name); + /* + TODO: switch from protocol to push_warning here. The main reason we didn't + it yet is parallel repair. Due to following trace: + mi_check_print_msg/push_warning/sql_alloc/my_pthread_getspecific_ptr. + + Also we likely need to lock mutex here (in both cases with protocol and + push_warning). + */ protocol->prepare_for_resend(); protocol->store(name, length, system_charset_info); protocol->store(param->op_name, system_charset_info); @@ -95,6 +103,343 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, return; } + +/* + Convert TABLE object to MyISAM key and column definition + + SYNOPSIS + table2myisam() + table_arg in TABLE object. + keydef_out out MyISAM key definition. + recinfo_out out MyISAM column definition. + records_out out Number of fields. + + DESCRIPTION + This function will allocate and initialize MyISAM key and column + definition for further use in mi_create or for a check for underlying + table conformance in merge engine. + + RETURN VALUE + 0 OK + !0 error code +*/ + +int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, + MI_COLUMNDEF **recinfo_out, uint *records_out) +{ + uint i, j, recpos, minpos, fieldpos, temp_length, length; + enum ha_base_keytype type= HA_KEYTYPE_BINARY; + uchar *record; + KEY *pos; + MI_KEYDEF *keydef; + MI_COLUMNDEF *recinfo, *recinfo_pos; + HA_KEYSEG *keyseg; + TABLE_SHARE *share= table_arg->s; + uint options= share->db_options_in_use; + DBUG_ENTER("table2myisam"); + if (!(my_multi_malloc(MYF(MY_WME), + recinfo_out, (share->fields * 2 + 2) * sizeof(MI_COLUMNDEF), + keydef_out, share->keys * sizeof(MI_KEYDEF), + &keyseg, + (share->key_parts + share->keys) * sizeof(HA_KEYSEG), + NullS))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */ + keydef= *keydef_out; + recinfo= *recinfo_out; + pos= table_arg->key_info; + for (i= 0; i < share->keys; i++, pos++) + { + keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL)); + keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ? + (pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) : + pos->algorithm; + keydef[i].block_length= pos->block_size; + keydef[i].seg= keyseg; + keydef[i].keysegs= pos->key_parts; + for (j= 0; j < pos->key_parts; j++) + { + Field *field= pos->key_part[j].field; + type= field->key_type(); + keydef[i].seg[j].flag= pos->key_part[j].key_part_flag; + + if (options & HA_OPTION_PACK_KEYS || + (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | + HA_SPACE_PACK_USED))) + { + if (pos->key_part[j].length > 8 && + (type == HA_KEYTYPE_TEXT || + type == HA_KEYTYPE_NUM || + (type == HA_KEYTYPE_BINARY && !field->zero_pack()))) + { + /* No blobs here */ + if (j == 0) + keydef[i].flag|= HA_PACK_KEY; + if (!(field->flags & ZEROFILL_FLAG) && + (field->type() == MYSQL_TYPE_STRING || + field->type() == MYSQL_TYPE_VAR_STRING || + ((int) (pos->key_part[j].length - field->decimals())) >= 4)) + keydef[i].seg[j].flag|= HA_SPACE_PACK; + } + else if (j == 0 && (!(pos->flags & HA_NOSAME) || pos->key_length > 16)) + keydef[i].flag|= HA_BINARY_PACK_KEY; + } + keydef[i].seg[j].type= (int) type; + keydef[i].seg[j].start= pos->key_part[j].offset; + keydef[i].seg[j].length= pos->key_part[j].length; + keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= + keydef[i].seg[j].bit_length= 0; + keydef[i].seg[j].bit_pos= 0; + keydef[i].seg[j].language= field->charset()->number; + + if (field->null_ptr) + { + keydef[i].seg[j].null_bit= field->null_bit; + keydef[i].seg[j].null_pos= (uint) (field->null_ptr- + (uchar*) table_arg->record[0]); + } + else + { + keydef[i].seg[j].null_bit= 0; + keydef[i].seg[j].null_pos= 0; + } + if (field->type() == MYSQL_TYPE_BLOB || + field->type() == MYSQL_TYPE_GEOMETRY) + { + keydef[i].seg[j].flag|= HA_BLOB_PART; + /* save number of bytes used to pack length */ + keydef[i].seg[j].bit_start= (uint) (field->pack_length() - + share->blob_ptr_size); + } + else if (field->type() == MYSQL_TYPE_BIT) + { + keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len; + keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs; + keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr - + (uchar*) table_arg->record[0]); + } + } + keyseg+= pos->key_parts; + } + if (table_arg->found_next_number_field) + keydef[share->next_number_index].flag|= HA_AUTO_KEY; + record= table_arg->record[0]; + recpos= 0; + recinfo_pos= recinfo; + while (recpos < (uint) share->reclength) + { + Field **field, *found= 0; + minpos= share->reclength; + length= 0; + + for (field= table_arg->field; *field; field++) + { + if ((fieldpos= (*field)->offset(record)) >= recpos && + fieldpos <= minpos) + { + /* skip null fields */ + if (!(temp_length= (*field)->pack_length_in_rec())) + continue; /* Skip null-fields */ + if (! found || fieldpos < minpos || + (fieldpos == minpos && temp_length < length)) + { + minpos= fieldpos; + found= *field; + length= temp_length; + } + } + } + DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d", + (long) found, recpos, minpos, length)); + if (recpos != minpos) + { // Reserved space (Null bits?) + bzero((char*) recinfo_pos, sizeof(*recinfo_pos)); + recinfo_pos->type= (int) FIELD_NORMAL; + recinfo_pos++->length= (uint16) (minpos - recpos); + } + if (!found) + break; + + if (found->flags & BLOB_FLAG) + recinfo_pos->type= (int) FIELD_BLOB; + else if (found->type() == MYSQL_TYPE_VARCHAR) + recinfo_pos->type= FIELD_VARCHAR; + else if (!(options & HA_OPTION_PACK_RECORD)) + recinfo_pos->type= (int) FIELD_NORMAL; + else if (found->zero_pack()) + recinfo_pos->type= (int) FIELD_SKIP_ZERO; + else + recinfo_pos->type= (int) ((length <= 3 || + (found->flags & ZEROFILL_FLAG)) ? + FIELD_NORMAL : + found->type() == MYSQL_TYPE_STRING || + found->type() == MYSQL_TYPE_VAR_STRING ? + FIELD_SKIP_ENDSPACE : + FIELD_SKIP_PRESPACE); + if (found->null_ptr) + { + recinfo_pos->null_bit= found->null_bit; + recinfo_pos->null_pos= (uint) (found->null_ptr - + (uchar*) table_arg->record[0]); + } + else + { + recinfo_pos->null_bit= 0; + recinfo_pos->null_pos= 0; + } + (recinfo_pos++)->length= (uint16) length; + recpos= minpos + length; + DBUG_PRINT("loop", ("length: %d type: %d", + recinfo_pos[-1].length,recinfo_pos[-1].type)); + } + *records_out= (uint) (recinfo_pos - recinfo); + DBUG_RETURN(0); +} + + +/* + Check for underlying table conformance + + SYNOPSIS + check_definition() + t1_keyinfo in First table key definition + t1_recinfo in First table record definition + t1_keys in Number of keys in first table + t1_recs in Number of records in first table + t2_keyinfo in Second table key definition + t2_recinfo in Second table record definition + t2_keys in Number of keys in second table + t2_recs in Number of records in second table + strict in Strict check switch + + DESCRIPTION + This function compares two MyISAM definitions. By intention it was done + to compare merge table definition against underlying table definition. + It may also be used to compare dot-frm and MYI definitions of MyISAM + table as well to compare different MyISAM table definitions. + + For merge table it is not required that number of keys in merge table + must exactly match number of keys in underlying table. When calling this + function for underlying table conformance check, 'strict' flag must be + set to false, and converted merge definition must be passed as t1_*. + + Otherwise 'strict' flag must be set to 1 and it is not required to pass + converted dot-frm definition as t1_*. + + RETURN VALUE + 0 - Equal definitions. + 1 - Different definitions. + + TODO + - compare FULLTEXT keys; + - compare SPATIAL keys; + - compare FIELD_SKIP_ZERO which is converted to FIELD_NORMAL correctly + (should be corretly detected in table2myisam). +*/ + +int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, + uint t1_keys, uint t1_recs, + MI_KEYDEF *t2_keyinfo, MI_COLUMNDEF *t2_recinfo, + uint t2_keys, uint t2_recs, bool strict) +{ + uint i, j; + DBUG_ENTER("check_definition"); + if ((strict ? t1_keys != t2_keys : t1_keys > t2_keys)) + { + DBUG_PRINT("error", ("Number of keys differs: t1_keys=%u, t2_keys=%u", + t1_keys, t2_keys)); + DBUG_RETURN(1); + } + if (t1_recs != t2_recs) + { + DBUG_PRINT("error", ("Number of recs differs: t1_recs=%u, t2_recs=%u", + t1_recs, t2_recs)); + DBUG_RETURN(1); + } + for (i= 0; i < t1_keys; i++) + { + HA_KEYSEG *t1_keysegs= t1_keyinfo[i].seg; + HA_KEYSEG *t2_keysegs= t2_keyinfo[i].seg; + if (t1_keyinfo[i].flag & HA_FULLTEXT && t2_keyinfo[i].flag & HA_FULLTEXT) + continue; + else if (t1_keyinfo[i].flag & HA_FULLTEXT || + t2_keyinfo[i].flag & HA_FULLTEXT) + { + DBUG_PRINT("error", ("Key %d has different definition", i)); + DBUG_PRINT("error", ("t1_fulltext= %d, t2_fulltext=%d", + test(t1_keyinfo[i].flag & HA_FULLTEXT), + test(t2_keyinfo[i].flag & HA_FULLTEXT))); + DBUG_RETURN(1); + } + if (t1_keyinfo[i].flag & HA_SPATIAL && t2_keyinfo[i].flag & HA_SPATIAL) + continue; + else if (t1_keyinfo[i].flag & HA_SPATIAL || + t2_keyinfo[i].flag & HA_SPATIAL) + { + DBUG_PRINT("error", ("Key %d has different definition", i)); + DBUG_PRINT("error", ("t1_spatial= %d, t2_spatial=%d", + test(t1_keyinfo[i].flag & HA_SPATIAL), + test(t2_keyinfo[i].flag & HA_SPATIAL))); + DBUG_RETURN(1); + } + if (t1_keyinfo[i].keysegs != t2_keyinfo[i].keysegs || + t1_keyinfo[i].key_alg != t2_keyinfo[i].key_alg) + { + DBUG_PRINT("error", ("Key %d has different definition", i)); + DBUG_PRINT("error", ("t1_keysegs=%d, t1_key_alg=%d", + t1_keyinfo[i].keysegs, t1_keyinfo[i].key_alg)); + DBUG_PRINT("error", ("t2_keysegs=%d, t2_key_alg=%d", + t2_keyinfo[i].keysegs, t2_keyinfo[i].key_alg)); + DBUG_RETURN(1); + } + for (j= t1_keyinfo[i].keysegs; j--;) + { + if (t1_keysegs[j].type != t2_keysegs[j].type || + t1_keysegs[j].language != t2_keysegs[j].language || + t1_keysegs[j].null_bit != t2_keysegs[j].null_bit || + t1_keysegs[j].length != t2_keysegs[j].length) + { + DBUG_PRINT("error", ("Key segment %d (key %d) has different " + "definition", j, i)); + DBUG_PRINT("error", ("t1_type=%d, t1_language=%d, t1_null_bit=%d, " + "t1_length=%d", + t1_keysegs[j].type, t1_keysegs[j].language, + t1_keysegs[j].null_bit, t1_keysegs[j].length)); + DBUG_PRINT("error", ("t2_type=%d, t2_language=%d, t2_null_bit=%d, " + "t2_length=%d", + t2_keysegs[j].type, t2_keysegs[j].language, + t2_keysegs[j].null_bit, t2_keysegs[j].length)); + + DBUG_RETURN(1); + } + } + } + for (i= 0; i < t1_recs; i++) + { + MI_COLUMNDEF *t1_rec= &t1_recinfo[i]; + MI_COLUMNDEF *t2_rec= &t2_recinfo[i]; + /* + FIELD_SKIP_ZERO can be changed to FIELD_NORMAL in mi_create, + see NOTE1 in mi_create.c + */ + if ((t1_rec->type != t2_rec->type && + !(t1_rec->type == (int) FIELD_SKIP_ZERO && + t1_rec->length == 1 && + t2_rec->type == (int) FIELD_NORMAL)) || + t1_rec->length != t2_rec->length || + t1_rec->null_bit != t2_rec->null_bit) + { + DBUG_PRINT("error", ("Field %d has different definition", i)); + DBUG_PRINT("error", ("t1_type=%d, t1_length=%d, t1_null_bit=%d", + t1_rec->type, t1_rec->length, t1_rec->null_bit)); + DBUG_PRINT("error", ("t2_type=%d, t2_length=%d, t2_null_bit=%d", + t2_rec->type, t2_rec->length, t2_rec->null_bit)); + DBUG_RETURN(1); + } + } + DBUG_RETURN(0); +} + + extern "C" { volatile int *killed_ptr(MI_CHECK *param) @@ -137,6 +482,7 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...) ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg) :handler(hton, table_arg), file(0), int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS | HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS | @@ -194,7 +540,7 @@ int ha_myisam::net_read_dump(NET* net) error= -1; goto err; } - if (my_write(data_fd, (byte*)net->read_pos, (uint) packet_len, + if (my_write(data_fd, (uchar*)net->read_pos, (uint) packet_len, MYF(MY_WME|MY_FNABP))) { error = errno; @@ -213,7 +559,7 @@ int ha_myisam::dump(THD* thd, int fd) uint blocksize = share->blocksize; my_off_t bytes_to_read = share->state.state.data_file_length; int data_fd = file->dfile; - byte * buf = (byte*) my_malloc(blocksize, MYF(MY_WME)); + uchar *buf = (uchar*) my_malloc(blocksize, MYF(MY_WME)); if (!buf) return ENOMEM; @@ -221,7 +567,7 @@ int ha_myisam::dump(THD* thd, int fd) my_seek(data_fd, 0L, MY_SEEK_SET, MYF(MY_WME)); for (; bytes_to_read > 0;) { - uint bytes = my_read(data_fd, buf, blocksize, MYF(MY_WME)); + size_t bytes = my_read(data_fd, buf, blocksize, MYF(MY_WME)); if (bytes == MY_FILE_ERROR) { error = errno; @@ -238,7 +584,7 @@ int ha_myisam::dump(THD* thd, int fd) } else { - if (my_net_write(net, (char*) buf, bytes)) + if (my_net_write(net, buf, bytes)) { error = errno ? errno : EPIPE; goto err; @@ -249,62 +595,70 @@ int ha_myisam::dump(THD* thd, int fd) if (fd < 0) { - if (my_net_write(net, "", 0)) + if (my_net_write(net, (uchar*) "", 0)) error = errno ? errno : EPIPE; net_flush(net); } err: - my_free((gptr) buf, MYF(0)); + my_free((uchar*) buf, MYF(0)); return error; } #endif /* HAVE_REPLICATION */ -bool ha_myisam::check_if_locking_is_allowed(uint sql_command, - ulong type, TABLE *table, - uint count, - bool called_by_privileged_thread) +/* Name is here without an extension */ +int ha_myisam::open(const char *name, int mode, uint test_if_locked) { - /* - To be able to open and lock for reading system tables like 'mysql.proc', - when we already have some tables opened and locked, and avoid deadlocks - we have to disallow write-locking of these tables with any other tables. - */ - if (table->s->system_table && - table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE && - count != 1) - { - my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table->s->db.str, - table->s->table_name.str); - return FALSE; - } + MI_KEYDEF *keyinfo; + MI_COLUMNDEF *recinfo= 0; + uint recs; + uint i; /* - Deny locking of the log tables, which is incompatible with - concurrent insert. Unless called from a logger THD (general_log_thd - or slow_log_thd) or by a privileged thread. + If the user wants to have memory mapped data files, add an + open_flag. Do not memory map temporary tables because they are + expected to be inserted and thus extended a lot. Memory mapping is + efficient for files that keep their size, but very inefficient for + growing files. Using an open_flag instead of calling mi_extra(... + HA_EXTRA_MMAP ...) after mi_open() has the advantage that the + mapping is not repeated for every open, but just done on the initial + open, when the MyISAM share is created. Everytime the server + requires to open a new instance of a table it calls this method. We + will always supply HA_OPEN_MMAP for a permanent table. However, the + MyISAM storage engine will ignore this flag if this is a secondary + open of a table that is in use by other threads already (if the + MyISAM share exists already). */ - if (!called_by_privileged_thread) - return check_if_log_table_locking_is_allowed(sql_command, type, table); - - return TRUE; -} - - /* Name is here without an extension */ + if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap) + test_if_locked|= HA_OPEN_MMAP; -int ha_myisam::open(const char *name, int mode, uint test_if_locked) -{ - uint i; if (!(file=mi_open(name, mode, test_if_locked | HA_OPEN_FROM_SQL_LAYER))) return (my_errno ? my_errno : -1); + if (!table->s->tmp_table) /* No need to perform a check for tmp table */ + { + if ((my_errno= table2myisam(table, &keyinfo, &recinfo, &recs))) + { + /* purecov: begin inspected */ + DBUG_PRINT("error", ("Failed to convert TABLE object to MyISAM " + "key and column definition")); + goto err; + /* purecov: end */ + } + if (check_definition(keyinfo, recinfo, table->s->keys, recs, + file->s->keyinfo, file->s->rec, + file->s->base.keys, file->s->base.fields, true)) + { + /* purecov: begin inspected */ + my_errno= HA_ERR_CRASHED; + goto err; + /* purecov: end */ + } + } if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE)) VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0)); - if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap) - VOID(mi_extra(file, HA_EXTRA_MMAP, 0)); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0)); @@ -315,13 +669,24 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) for (i= 0; i < table->s->keys; i++) { - struct st_plugin_int *parser= table->key_info[i].parser; + plugin_ref parser= table->key_info[i].parser; if (table->key_info[i].flags & HA_USES_PARSER) file->s->keyinfo[i].parser= - (struct st_mysql_ftparser *)parser->plugin->info; + (struct st_mysql_ftparser *)plugin_decl(parser)->info; table->key_info[i].block_size= file->s->keyinfo[i].block_length; } - return (0); + my_errno= 0; + goto end; + err: + this->close(); + end: + /* + Both recinfo and keydef are allocated by my_multi_malloc(), thus only + recinfo must be freed. + */ + if (recinfo) + my_free((uchar*) recinfo, MYF(0)); + return my_errno; } int ha_myisam::close(void) @@ -331,9 +696,9 @@ int ha_myisam::close(void) return mi_close(tmp); } -int ha_myisam::write_row(byte * buf) +int ha_myisam::write_row(uchar *buf) { - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); /* If we have a timestamp column, update it to the current time */ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -647,8 +1012,8 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) param.sort_buffer_length= check_opt->sort_buffer_size; if ((error= repair(thd,param,1)) && param.retry_repair) { - sql_print_warning("Warning: Optimize table got errno %d, retrying", - my_errno); + sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying", + my_errno, param.db_name, param.table_name); param.testflag&= ~T_REP_BY_SORT; error= repair(thd,param,1); } @@ -656,17 +1021,33 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) } -int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) +int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool do_optimize) { int error=0; uint local_testflag=param.testflag; - bool optimize_done= !optimize, statistics_done=0; + bool optimize_done= !do_optimize, statistics_done=0; const char *old_proc_info=thd->proc_info; char fixed_name[FN_REFLEN]; MYISAM_SHARE* share = file->s; ha_rows rows= file->state->records; DBUG_ENTER("ha_myisam::repair"); + /* + Normally this method is entered with a properly opened table. If the + repair fails, it can be repeated with more elaborate options. Under + special circumstances it can happen that a repair fails so that it + closed the data file and cannot re-open it. In this case file->dfile + is set to -1. We must not try another repair without an open data + file. (Bug #25289) + */ + if (file->dfile == -1) + { + sql_print_information("Retrying repair of: '%s' failed. " + "Please try REPAIR EXTENDED or myisamchk", + table->s->path.str); + DBUG_RETURN(HA_ADMIN_FAILED); + } + param.db_name= table->s->db.str; param.table_name= table->alias; param.tmpfile_createflag = O_RDWR | O_TRUNC; @@ -684,7 +1065,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) DBUG_RETURN(HA_ADMIN_FAILED); } - if (!optimize || + if (!do_optimize || ((file->state->del || share->state.split != file->state->records) && (!(param.testflag & T_QUICK) || !(share->state.changed & STATE_NOT_OPTIMIZED_KEYS)))) @@ -803,23 +1184,18 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) KEY_CACHE *new_key_cache= check_opt->key_cache; const char *errmsg= 0; int error= HA_ADMIN_OK; - ulonglong map= ~(ulonglong) 0; + ulonglong map; TABLE_LIST *table_list= table->pos_in_table_list; DBUG_ENTER("ha_myisam::assign_to_keycache"); - /* Check validity of the index references */ - if (table_list->use_index) - { - /* We only come here when the user did specify an index map */ - key_map kmap; - if (get_key_map_from_key_list(&kmap, table, table_list->use_index)) - { - errmsg= thd->net.last_error; - error= HA_ADMIN_FAILED; - goto err; - } - map= kmap.to_ulonglong(); - } + table->keys_in_use_for_query.clear_all(); + + if (table_list->process_index_hints(table)) + DBUG_RETURN(HA_ADMIN_FAILED); + map= ~(ulonglong) 0; + if (!table->keys_in_use_for_query.is_clear_all()) + /* use all keys if there's no list specified by the user through hints */ + map= table->keys_in_use_for_query.to_ulonglong(); if ((error= mi_assign_to_key_cache(file, map, new_key_cache))) { @@ -830,7 +1206,6 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) error= HA_ADMIN_CORRUPT; } - err: if (error != HA_ADMIN_OK) { /* Send error to user */ @@ -855,26 +1230,23 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) { int error; const char *errmsg; - ulonglong map= ~(ulonglong) 0; + ulonglong map; TABLE_LIST *table_list= table->pos_in_table_list; my_bool ignore_leaves= table_list->ignore_leaves; + char buf[ERRMSGSIZE+20]; DBUG_ENTER("ha_myisam::preload_keys"); + table->keys_in_use_for_query.clear_all(); + + if (table_list->process_index_hints(table)) + DBUG_RETURN(HA_ADMIN_FAILED); + + map= ~(ulonglong) 0; /* Check validity of the index references */ - if (table_list->use_index) - { - key_map kmap; - get_key_map_from_key_list(&kmap, table, table_list->use_index); - if (kmap.is_set_all()) - { - errmsg= thd->net.last_error; - error= HA_ADMIN_FAILED; - goto err; - } - if (!kmap.is_clear_all()) - map= kmap.to_ulonglong(); - } + if (!table->keys_in_use_for_query.is_clear_all()) + /* use all keys if there's no list specified by the user through hints */ + map= table->keys_in_use_for_query.to_ulonglong(); mi_extra(file, HA_EXTRA_PRELOAD_BUFFER_SIZE, (void *) &thd->variables.preload_buff_size); @@ -889,7 +1261,6 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) errmsg= "Failed to allocate buffer"; break; default: - char buf[ERRMSGSIZE+20]; my_snprintf(buf, ERRMSGSIZE, "Failed to read from index file (errno: %d)", my_errno); errmsg= buf; @@ -1022,8 +1393,8 @@ int ha_myisam::enable_indexes(uint mode) param.tmpdir=&mysql_tmpdir_list; if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair) { - sql_print_warning("Warning: Enabling keys got errno %d, retrying", - my_errno); + sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, retrying", + my_errno, param.db_name, param.table_name); /* Repairing by sort failed. Now try standard repair method. */ param.testflag&= ~(T_REP_BY_SORT | T_QUICK); error= (repair(thd,param,0) != HA_ADMIN_OK); @@ -1032,8 +1403,10 @@ int ha_myisam::enable_indexes(uint mode) might have been set by the first repair. They can still be seen with SHOW WARNINGS then. */ +#ifndef EMBEDDED_LIBRARY if (! error) thd->clear_error(); +#endif /* EMBEDDED_LIBRARY */ } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; @@ -1189,99 +1562,95 @@ bool ha_myisam::is_crashed() const (my_disable_locking && file->s->state.open_count)); } -int ha_myisam::update_row(const byte * old_data, byte * new_data) +int ha_myisam::update_row(const uchar *old_data, uchar *new_data) { - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); } -int ha_myisam::delete_row(const byte * buf) +int ha_myisam::delete_row(const uchar *buf) { - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); return mi_delete(file,buf); } -int ha_myisam::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_myisam::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=mi_rkey(file,buf,active_index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=mi_rkey(file,buf,index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error=mi_rkey(file, buf, index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) +int ha_myisam::index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map) { DBUG_ENTER("ha_myisam::index_read_last"); DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); + ha_statistic_increment(&SSV::ha_read_key_count); + int error=mi_rkey(file, buf, active_index, key, keypart_map, + HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; DBUG_RETURN(error); } -int ha_myisam::index_next(byte * buf) +int ha_myisam::index_next(uchar *buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_prev(byte * buf) +int ha_myisam::index_prev(uchar *buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_first(byte * buf) +int ha_myisam::index_first(uchar *buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_last(byte * buf) +int ha_myisam::index_last(uchar *buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::index_next_same(byte * buf, - const byte *key __attribute__((unused)), +int ha_myisam::index_next_same(uchar *buf, + const uchar *key __attribute__((unused)), uint length __attribute__((unused))) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1295,71 +1664,69 @@ int ha_myisam::rnd_init(bool scan) return mi_reset(file); // Free buffers } -int ha_myisam::rnd_next(byte *buf) +int ha_myisam::rnd_next(uchar *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=mi_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisam::restart_rnd_next(byte *buf, byte *pos) +int ha_myisam::restart_rnd_next(uchar *buf, uchar *pos) { return rnd_pos(buf,pos); } -int ha_myisam::rnd_pos(byte * buf, byte *pos) +int ha_myisam::rnd_pos(uchar *buf, uchar *pos) { - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -void ha_myisam::position(const byte* record) +void ha_myisam::position(const uchar *record) { - my_off_t position=mi_position(file); - my_store_ptr(ref, ref_length, position); + my_off_t row_position= mi_position(file); + my_store_ptr(ref, ref_length, row_position); } int ha_myisam::info(uint flag) { - MI_ISAMINFO info; + MI_ISAMINFO misam_info; char name_buff[FN_REFLEN]; - (void) mi_status(file,&info,flag); + (void) mi_status(file,&misam_info,flag); if (flag & HA_STATUS_VARIABLE) { - stats.records = info.records; - stats.deleted = info.deleted; - stats.data_file_length=info.data_file_length; - stats.index_file_length=info.index_file_length; - stats.delete_length = info.delete_length; - stats.check_time = info.check_time; - stats. mean_rec_length=info.mean_reclength; + stats.records= misam_info.records; + stats.deleted= misam_info.deleted; + stats.data_file_length= misam_info.data_file_length; + stats.index_file_length= misam_info.index_file_length; + stats.delete_length= misam_info.delete_length; + stats.check_time= misam_info.check_time; + stats.mean_rec_length= misam_info.mean_reclength; } if (flag & HA_STATUS_CONST) { TABLE_SHARE *share= table->s; - stats.max_data_file_length= info.max_data_file_length; - stats.max_index_file_length= info.max_index_file_length; - stats.create_time= info.create_time; - ref_length= info.reflength; - share->db_options_in_use= info.options; + stats.max_data_file_length= misam_info.max_data_file_length; + stats.max_index_file_length= misam_info.max_index_file_length; + stats.create_time= misam_info.create_time; + ref_length= misam_info.reflength; + share->db_options_in_use= misam_info.options; stats.block_size= myisam_block_size; /* record block size */ /* Update share */ if (share->tmp_table == NO_TMP_TABLE) pthread_mutex_lock(&share->mutex); share->keys_in_use.set_prefix(share->keys); - share->keys_in_use.intersect_extended(info.key_map); + share->keys_in_use.intersect_extended(misam_info.key_map); share->keys_for_keyread.intersect(share->keys_in_use); - share->db_record_offset= info.record_offset; + share->db_record_offset= misam_info.record_offset; if (share->key_parts) memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, + (char*) misam_info.rec_per_key, sizeof(table->key_info[0].rec_per_key)*share->key_parts); if (share->tmp_table == NO_TMP_TABLE) pthread_mutex_unlock(&share->mutex); @@ -1371,22 +1738,22 @@ int ha_myisam::info(uint flag) data_file_name= index_file_name= 0; fn_format(name_buff, file->filename, "", MI_NAME_DEXT, MY_APPEND_EXT | MY_UNPACK_FILENAME); - if (strcmp(name_buff, info.data_file_name)) - data_file_name=info.data_file_name; + if (strcmp(name_buff, misam_info.data_file_name)) + data_file_name=misam_info.data_file_name; fn_format(name_buff, file->filename, "", MI_NAME_IEXT, MY_APPEND_EXT | MY_UNPACK_FILENAME); - if (strcmp(name_buff, info.index_file_name)) - index_file_name=info.index_file_name; + if (strcmp(name_buff, misam_info.index_file_name)) + index_file_name=misam_info.index_file_name; } if (flag & HA_STATUS_ERRKEY) { - errkey = info.errkey; - my_store_ptr(dup_ref, ref_length, info.dupp_key_pos); + errkey = misam_info.errkey; + my_store_ptr(dup_ref, ref_length, misam_info.dupp_key_pos); } if (flag & HA_STATUS_TIME) - stats.update_time = info.update_time; + stats.update_time = misam_info.update_time; if (flag & HA_STATUS_AUTO) - stats.auto_increment_value= info.auto_increment; + stats.auto_increment_value= misam_info.auto_increment; return 0; } @@ -1454,200 +1821,43 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info) int ha_myisam::create(const char *name, register TABLE *table_arg, - HA_CREATE_INFO *info) + HA_CREATE_INFO *ha_create_info) { int error; - uint i,j,recpos,minpos,fieldpos,temp_length,length, create_flags= 0; - bool found_real_auto_increment=0; - enum ha_base_keytype type; + uint create_flags= 0, records, i; char buff[FN_REFLEN]; - byte *record; - KEY *pos; MI_KEYDEF *keydef; - MI_COLUMNDEF *recinfo,*recinfo_pos; - HA_KEYSEG *keyseg; + MI_COLUMNDEF *recinfo; + MI_CREATE_INFO create_info; TABLE_SHARE *share= table_arg->s; uint options= share->db_options_in_use; DBUG_ENTER("ha_myisam::create"); - - type=HA_KEYTYPE_BINARY; // Keep compiler happy - if (!(my_multi_malloc(MYF(MY_WME), - &recinfo,(share->fields*2+2)* - sizeof(MI_COLUMNDEF), - &keydef, share->keys*sizeof(MI_KEYDEF), - &keyseg, - ((share->key_parts + share->keys) * - sizeof(HA_KEYSEG)), - NullS))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - - pos=table_arg->key_info; - for (i=0; i < share->keys ; i++, pos++) + for (i= 0; i < share->keys; i++) { - if (pos->flags & HA_USES_PARSER) - create_flags|= HA_CREATE_RELIES_ON_SQL_LAYER; - keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL)); - keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ? - (pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) : - pos->algorithm; - keydef[i].block_length= pos->block_size; - - keydef[i].seg=keyseg; - keydef[i].keysegs=pos->key_parts; - for (j=0 ; j < pos->key_parts ; j++) + if (table_arg->key_info[i].flags & HA_USES_PARSER) { - Field *field=pos->key_part[j].field; - type=field->key_type(); - keydef[i].seg[j].flag=pos->key_part[j].key_part_flag; - - if (options & HA_OPTION_PACK_KEYS || - (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | - HA_SPACE_PACK_USED))) - { - if (pos->key_part[j].length > 8 && - (type == HA_KEYTYPE_TEXT || - type == HA_KEYTYPE_NUM || - (type == HA_KEYTYPE_BINARY && !field->zero_pack()))) - { - /* No blobs here */ - if (j == 0) - keydef[i].flag|=HA_PACK_KEY; - if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == MYSQL_TYPE_STRING || - field->type() == MYSQL_TYPE_VAR_STRING || - ((int) (pos->key_part[j].length - field->decimals())) - >= 4)) - keydef[i].seg[j].flag|=HA_SPACE_PACK; - } - else if (j == 0 && (!(pos->flags & HA_NOSAME) || pos->key_length > 16)) - keydef[i].flag|= HA_BINARY_PACK_KEY; - } - keydef[i].seg[j].type= (int) type; - keydef[i].seg[j].start= pos->key_part[j].offset; - keydef[i].seg[j].length= pos->key_part[j].length; - keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= - keydef[i].seg[j].bit_length= 0; - keydef[i].seg[j].bit_pos= 0; - keydef[i].seg[j].language= field->charset()->number; - - if (field->null_ptr) - { - keydef[i].seg[j].null_bit=field->null_bit; - keydef[i].seg[j].null_pos= (uint) (field->null_ptr- - (uchar*) table_arg->record[0]); - } - else - { - keydef[i].seg[j].null_bit=0; - keydef[i].seg[j].null_pos=0; - } - if (field->type() == MYSQL_TYPE_BLOB || - field->type() == MYSQL_TYPE_GEOMETRY) - { - keydef[i].seg[j].flag|=HA_BLOB_PART; - /* save number of bytes used to pack length */ - keydef[i].seg[j].bit_start= (uint) (field->pack_length() - - share->blob_ptr_size); - } - else if (field->type() == MYSQL_TYPE_BIT) - { - keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len; - keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs; - keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr - - (uchar*) table_arg->record[0]); - } - } - keyseg+=pos->key_parts; - } - - if (table_arg->found_next_number_field) - { - keydef[share->next_number_index].flag|= HA_AUTO_KEY; - found_real_auto_increment= share->next_number_key_offset == 0; - } - - record= table_arg->record[0]; - recpos=0; recinfo_pos=recinfo; - while (recpos < (uint) share->reclength) - { - Field **field,*found=0; - minpos= share->reclength; - length=0; - - for (field=table_arg->field ; *field ; field++) - { - if ((fieldpos=(*field)->offset(record)) >= recpos && - fieldpos <= minpos) - { - /* skip null fields */ - if (!(temp_length= (*field)->pack_length_in_rec())) - continue; /* Skip null-fields */ - if (! found || fieldpos < minpos || - (fieldpos == minpos && temp_length < length)) - { - minpos=fieldpos; found= *field; length=temp_length; - } - } - } - DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", - (long) found, recpos, minpos, length)); - if (recpos != minpos) - { // Reserved space (Null bits?) - bzero((char*) recinfo_pos,sizeof(*recinfo_pos)); - recinfo_pos->type=(int) FIELD_NORMAL; - recinfo_pos++->length= (uint16) (minpos-recpos); - } - if (! found) + create_flags|= HA_CREATE_RELIES_ON_SQL_LAYER; break; - - if (found->flags & BLOB_FLAG) - recinfo_pos->type= (int) FIELD_BLOB; - else if (found->type() == MYSQL_TYPE_VARCHAR) - recinfo_pos->type= FIELD_VARCHAR; - else if (!(options & HA_OPTION_PACK_RECORD)) - recinfo_pos->type= (int) FIELD_NORMAL; - else if (found->zero_pack()) - recinfo_pos->type= (int) FIELD_SKIP_ZERO; - else - recinfo_pos->type= (int) ((length <= 3 || - (found->flags & ZEROFILL_FLAG)) ? - FIELD_NORMAL : - found->type() == MYSQL_TYPE_STRING || - found->type() == MYSQL_TYPE_VAR_STRING ? - FIELD_SKIP_ENDSPACE : - FIELD_SKIP_PRESPACE); - if (found->null_ptr) - { - recinfo_pos->null_bit=found->null_bit; - recinfo_pos->null_pos= (uint) (found->null_ptr- - (uchar*) table_arg->record[0]); } - else - { - recinfo_pos->null_bit=0; - recinfo_pos->null_pos=0; - } - (recinfo_pos++)->length= (uint16) length; - recpos=minpos+length; - DBUG_PRINT("loop",("length: %d type: %d", - recinfo_pos[-1].length,recinfo_pos[-1].type)); - } - MI_CREATE_INFO create_info; - bzero((char*) &create_info,sizeof(create_info)); + if ((error= table2myisam(table_arg, &keydef, &recinfo, &records))) + DBUG_RETURN(error); /* purecov: inspected */ + bzero((char*) &create_info, sizeof(create_info)); create_info.max_rows= share->max_rows; create_info.reloc_rows= share->min_rows; - create_info.with_auto_increment=found_real_auto_increment; - create_info.auto_increment=(info->auto_increment_value ? - info->auto_increment_value -1 : - (ulonglong) 0); + create_info.with_auto_increment= share->next_number_key_offset == 0; + create_info.auto_increment= (ha_create_info->auto_increment_value ? + ha_create_info->auto_increment_value -1 : + (ulonglong) 0); create_info.data_file_length= ((ulonglong) share->max_rows * - share->avg_row_length); - create_info.data_file_name= info->data_file_name; - create_info.index_file_name= info->index_file_name; + share->avg_row_length); + create_info.data_file_name= ha_create_info->data_file_name; + create_info.index_file_name= ha_create_info->index_file_name; - if (info->options & HA_LEX_CREATE_TMP_TABLE) + if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE) create_flags|= HA_CREATE_TMP_TABLE; + if (ha_create_info->options & HA_CREATE_KEEP_FILES) + create_flags|= HA_CREATE_KEEP_FILES; if (options & HA_OPTION_PACK_RECORD) create_flags|= HA_PACK_RECORD; if (options & HA_OPTION_CHECKSUM) @@ -1656,13 +1866,13 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, create_flags|= HA_CREATE_DELAY_KEY_WRITE; /* TODO: Check that the following fn_format is really needed */ - error=mi_create(fn_format(buff,name,"","",MY_UNPACK_FILENAME|MY_APPEND_EXT), - share->keys,keydef, - (uint) (recinfo_pos-recinfo), recinfo, - 0, (MI_UNIQUEDEF*) 0, - &create_info, create_flags); - - my_free((gptr) recinfo,MYF(0)); + error= mi_create(fn_format(buff, name, "", "", + MY_UNPACK_FILENAME|MY_APPEND_EXT), + share->keys, keydef, + records, recinfo, + 0, (MI_UNIQUEDEF*) 0, + &create_info, create_flags); + my_free((uchar*) recinfo, MYF(0)); DBUG_RETURN(error); } @@ -1680,7 +1890,7 @@ void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment, { ulonglong nr; int error; - byte key[MI_MAX_KEY_LENGTH]; + uchar key[MI_MAX_KEY_LENGTH]; if (!table->s->next_number_key_offset) { // Autoincrement at key-start @@ -1698,8 +1908,9 @@ void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment, key_copy(key, table->record[0], table->key_info + table->s->next_number_index, table->s->next_number_key_offset); - error= mi_rkey(file,table->record[1],(int) table->s->next_number_index, - key,table->s->next_number_key_offset,HA_READ_PREFIX_LAST); + error= mi_rkey(file, table->record[1], (int) table->s->next_number_index, + key, make_prev_keypart_map(table->s->next_number_keypart), + HA_READ_PREFIX_LAST); if (error) nr= 1; else @@ -1752,7 +1963,7 @@ ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key, } -int ha_myisam::ft_read(byte * buf) +int ha_myisam::ft_read(uchar *buf) { int error; @@ -1833,3 +2044,78 @@ mysql_declare_plugin(myisam) } mysql_declare_plugin_end; + +#ifdef HAVE_QUERY_CACHE +/** + @brief Register a named table with a call back function to the query cache. + + @param thd The thread handle + @param table_key A pointer to the table name in the table cache + @param key_length The length of the table name + @param[out] engine_callback The pointer to the storage engine call back + function, currently 0 + @param[out] engine_data Engine data will be set to 0. + + @note Despite the name of this function, it is used to check each statement + before it is cached and not to register a table or callback function. + + @see handler::register_query_cache_table + + @return The error code. The engine_data and engine_callback will be set to 0. + @retval TRUE Success + @retval FALSE An error occured +*/ + +my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name, + uint table_name_len, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) +{ + /* + No call back function is needed to determine if a cached statement + is valid or not. + */ + *engine_callback= 0; + + /* + No engine data is needed. + */ + *engine_data= 0; + + /* + If a concurrent INSERT has happened just before the currently processed + SELECT statement, the total size of the table is unknown. + + To determine if the table size is known, the current thread's snap shot of + the table size with the actual table size are compared. + + If the table size is unknown the SELECT statement can't be cached. + */ + ulonglong actual_data_file_length; + ulonglong current_data_file_length; + + /* + POSIX visibility rules specify that "2. Whatever memory values a + thread can see when it unlocks a mutex <...> can also be seen by any + thread that later locks the same mutex". In this particular case, + concurrent insert thread had modified the data_file_length in + MYISAM_SHARE before it has unlocked (or even locked) + structure_guard_mutex. So, here we're guaranteed to see at least that + value after we've locked the same mutex. We can see a later value + (modified by some other thread) though, but it's ok, as we only want + to know if the variable was changed, the actual new value doesn't matter + */ + actual_data_file_length= file->s->state.state.data_file_length; + current_data_file_length= file->save_state.data_file_length; + + if (current_data_file_length != actual_data_file_length) + { + /* Don't cache current statement. */ + return FALSE; + } + + /* It is ok to try to cache current statement. */ + return TRUE; +} +#endif diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 882900bd35f..e8594fc9039 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -60,25 +60,22 @@ class ha_myisam: public handler uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; } uint checksum() const; - virtual bool check_if_locking_is_allowed(uint sql_command, - ulong type, TABLE *table, - uint count, - bool called_by_logger_thread); int open(const char *name, int mode, uint test_if_locked); int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int index_next_same(byte *buf, const byte *key, uint keylen); + int write_row(uchar * buf); + int update_row(const uchar * old_data, uchar * new_data); + int delete_row(const uchar * buf); + int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_idx_map(uchar *buf, uint index, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map); + int index_next(uchar * buf); + int index_prev(uchar * buf); + int index_first(uchar * buf); + int index_last(uchar * buf); + int index_next_same(uchar *buf, const uchar *key, uint keylen); int ft_init() { if (!ft_handler) @@ -89,15 +86,15 @@ class ha_myisam: public handler FT_INFO *ft_init_ext(uint flags, uint inx,String *key) { return ft_init_search(flags,file,inx, - (byte *)key->ptr(), key->length(), key->charset(), + (uchar *)key->ptr(), key->length(), key->charset(), table->record[0]); } - int ft_read(byte *buf); + int ft_read(uchar *buf); int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - int restart_rnd_next(byte *buf, byte *pos); - void position(const byte *record); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); + int restart_rnd_next(uchar *buf, uchar *pos); + void position(const uchar *record); int info(uint); int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); @@ -136,4 +133,11 @@ class ha_myisam: public handler int dump(THD* thd, int fd); int net_read_dump(NET* net); #endif +#ifdef HAVE_QUERY_CACHE + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data); +#endif }; diff --git a/storage/myisam/mi_cache.c b/storage/myisam/mi_cache.c index 59c9b2c8812..d6dcc431a8d 100644 --- a/storage/myisam/mi_cache.c +++ b/storage/myisam/mi_cache.c @@ -35,12 +35,12 @@ #include "myisamdef.h" -int _mi_read_cache(IO_CACHE *info, byte *buff, my_off_t pos, uint length, +int _mi_read_cache(IO_CACHE *info, uchar *buff, my_off_t pos, uint length, int flag) { uint read_length,in_buff_length; my_off_t offset; - char *in_buff_pos; + uchar *in_buff_pos; DBUG_ENTER("_mi_read_cache"); if (pos < info->pos_in_file) @@ -61,7 +61,7 @@ int _mi_read_cache(IO_CACHE *info, byte *buff, my_off_t pos, uint length, (my_off_t) (info->read_end - info->request_pos)) { in_buff_pos=info->request_pos+(uint) offset; - in_buff_length= min(length,(uint) (info->read_end-in_buff_pos)); + in_buff_length= min(length, (size_t) (info->read_end-in_buff_pos)); memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length); if (!(length-=in_buff_length)) DBUG_RETURN(0); diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 7bcb8041fe0..fe6b716877c 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -83,12 +83,12 @@ static int sort_delete_record(MI_SORT_PARAM *sort_param); /*static int flush_pending_blocks(MI_CHECK *param);*/ static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks, uint buffer_length); -static ha_checksum mi_byte_checksum(const byte *buf, uint length); +static ha_checksum mi_byte_checksum(const uchar *buf, uint length); static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share); void myisamchk_init(MI_CHECK *param) { - bzero((gptr) param,sizeof(*param)); + bzero((uchar*) param,sizeof(*param)); param->opt_follow_links=1; param->keys_in_use= ~(ulonglong) 0; param->search_after_block=HA_OFFSET_ERROR; @@ -173,7 +173,7 @@ int chk_del(MI_CHECK *param, register MI_INFO *info, uint test_flag) printf(" %9s",llstr(next_link,buff)); if (next_link >= info->state->data_file_length) goto wrong; - if (my_pread(info->dfile,(char*) buff,delete_link_length, + if (my_pread(info->dfile, (uchar*) buff,delete_link_length, next_link,MYF(MY_NABP))) { if (test_flag & T_VERBOSE) puts(""); @@ -250,7 +250,8 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr) my_off_t next_link; uint block_size=(nr+1)*MI_MIN_KEY_BLOCK_LENGTH; ha_rows records; - char llbuff[21], llbuff2[21], *buff; + char llbuff[21], llbuff2[21]; + uchar *buff; DBUG_ENTER("check_k_link"); DBUG_PRINT("enter", ("block_size: %u", block_size)); @@ -296,7 +297,7 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr) */ if (!(buff=key_cache_read(info->s->key_cache, info->s->kfile, next_link, DFLT_INIT_HITS, - (byte*) info->buff, MI_MIN_KEY_BLOCK_LENGTH, + (uchar*) info->buff, MI_MIN_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, 1))) { /* purecov: begin tested */ @@ -335,7 +336,7 @@ int chk_size(MI_CHECK *param, register MI_INFO *info) flush_key_blocks(info->s->key_cache, info->s->kfile, FLUSH_FORCE_WRITE); - size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0)); + size= my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)); if ((skr=(my_off_t) info->state->key_file_length) != size) { /* Don't give error if file generated by myisampack */ @@ -531,8 +532,8 @@ int chk_key(MI_CHECK *param, register MI_INFO *info) /* Check that there isn't a row with auto_increment = 0 in the table */ mi_extra(info,HA_EXTRA_KEYREAD,0); bzero(info->lastkey,keyinfo->seg->length); - if (!mi_rkey(info, info->rec_buff, key, (const byte*) info->lastkey, - keyinfo->seg->length, HA_READ_KEY_EXACT)) + if (!mi_rkey(info, info->rec_buff, key, (const uchar*) info->lastkey, + (key_part_map)1, HA_READ_KEY_EXACT)) { /* Don't count this as a real warning, as myisamchk can't correct it */ uint save=param->warning_printed; @@ -595,7 +596,8 @@ static int chk_index_down(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, { /* purecov: begin tested */ /* Give it a chance to fit in the real file size. */ - my_off_t max_length= my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(0)); + my_off_t max_length= my_seek(info->s->kfile, 0L, MY_SEEK_END, + MYF(MY_THREADSAFE)); mi_check_print_error(param, "Invalid key block position: %s " "key block size: %u file_length: %s", llstr(page, llbuff), keyinfo->block_length, @@ -740,7 +742,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, char llbuff[22]; uint diff_pos[2]; DBUG_ENTER("chk_index"); - DBUG_DUMP("buff",(byte*) buff,mi_getint(buff)); + DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff)); /* TODO: implement appropriate check for RTree keys */ if (keyinfo->flag & HA_SPATIAL) @@ -798,8 +800,8 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, (flag=ha_key_cmp(keyinfo->seg,info->lastkey,key,key_length, comp_flag, diff_pos)) >=0) { - DBUG_DUMP("old",(byte*) info->lastkey, info->lastkey_length); - DBUG_DUMP("new",(byte*) key, key_length); + DBUG_DUMP("old",(uchar*) info->lastkey, info->lastkey_length); + DBUG_DUMP("new",(uchar*) key, key_length); DBUG_DUMP("new_in_page",(char*) old_keypos,(uint) (keypos-old_keypos)); if (comp_flag & SEARCH_FIND && flag == 0) @@ -831,7 +833,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, key); } } - (*key_checksum)+= mi_byte_checksum((byte*) key, + (*key_checksum)+= mi_byte_checksum((uchar*) key, key_length- info->s->rec_reflength); record= _mi_dpos(info,0,key+key_length); if (keyinfo->flag & HA_FULLTEXT) /* special handling for ft2 */ @@ -869,7 +871,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, DBUG_PRINT("test",("page: %s record: %s filelength: %s", llstr(page,llbuff),llstr(record,llbuff2), llstr(info->state->data_file_length,llbuff3))); - DBUG_DUMP("key",(byte*) key,key_length); + DBUG_DUMP("key",(uchar*) key,key_length); DBUG_DUMP("new_in_page",(char*) old_keypos,(uint) (keypos-old_keypos)); goto err; } @@ -881,10 +883,10 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, llstr(page,llbuff), used_length, (keypos - buff)); goto err; } - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(0); err: - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(1); } /* chk_index */ @@ -939,7 +941,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) ha_rows records,del_blocks; my_off_t used,empty,pos,splits,start_recpos, del_length,link_used,start_block; - byte *record,*to; + uchar *record,*to; char llbuff[22],llbuff2[22],llbuff3[22]; ha_checksum intern_record_checksum; ha_checksum key_checksum[MI_MAX_POSSIBLE_KEY]; @@ -956,7 +958,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) puts("- check record links"); } - if (!(record= (byte*) my_malloc(info->s->base.pack_reclength,MYF(0)))) + if (!(record= (uchar*) my_malloc(info->s->base.pack_reclength,MYF(0)))) { mi_check_print_error(param,"Not enough memory for record"); DBUG_RETURN(-1); @@ -991,7 +993,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) goto err2; switch (info->s->data_file_type) { case STATIC_RECORD: - if (my_b_read(¶m->read_cache,(byte*) record, + if (my_b_read(¶m->read_cache,(uchar*) record, info->s->base.pack_reclength)) goto err; start_recpos=pos; @@ -1011,7 +1013,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) block_info.next_filepos=pos; do { - if (_mi_read_cache(¶m->read_cache,(byte*) block_info.header, + if (_mi_read_cache(¶m->read_cache,(uchar*) block_info.header, (start_block=block_info.next_filepos), sizeof(block_info.header), (flag ? 0 : READING_NEXT) | READING_HEADER)) @@ -1115,7 +1117,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) got_error=1; break; } - if (_mi_read_cache(¶m->read_cache,(byte*) to,block_info.filepos, + if (_mi_read_cache(¶m->read_cache,(uchar*) to,block_info.filepos, (uint) block_info.data_len, flag == 1 ? READING_NEXT : 0)) goto err; @@ -1176,7 +1178,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) pos=block_info.filepos+block_info.block_len; break; case COMPRESSED_RECORD: - if (_mi_read_cache(¶m->read_cache,(byte*) block_info.header, pos, + if (_mi_read_cache(¶m->read_cache,(uchar*) block_info.header, pos, info->s->pack.ref_length, READING_NEXT)) goto err; start_recpos=pos; @@ -1193,7 +1195,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) got_error=1; break; } - if (_mi_read_cache(¶m->read_cache,(byte*) info->rec_buff, + if (_mi_read_cache(¶m->read_cache,(uchar*) info->rec_buff, block_info.filepos, block_info.rec_len, READING_NEXT)) goto err; if (_mi_pack_rec_unpack(info, &info->bit_buff, record, @@ -1253,7 +1255,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) } } else - key_checksum[key]+=mi_byte_checksum((byte*) info->lastkey, + key_checksum[key]+=mi_byte_checksum((uchar*) info->lastkey, key_length); } } @@ -1363,12 +1365,12 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) printf("Lost space: %12s Linkdata: %10s\n", llstr(empty,llbuff),llstr(link_used,llbuff2)); } - my_free((gptr) record,MYF(0)); + my_free((uchar*) record,MYF(0)); DBUG_RETURN (error); err: mi_check_print_error(param,"got error: %d when reading datafile at record: %s",my_errno, llstr(records,llbuff)); err2: - my_free((gptr) record,MYF(0)); + my_free((uchar*) record,MYF(0)); param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(1); } /* chk_data_link */ @@ -1378,7 +1380,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) /* Save new datafile-name in temp_filename */ int mi_repair(MI_CHECK *param, register MI_INFO *info, - my_string name, int rep_quick) + char * name, int rep_quick) { int error,got_error; uint i; @@ -1427,7 +1429,7 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, MYF(MY_WME | MY_WAIT_IF_FULL))) goto err; info->opt_flag|=WRITE_CACHE_USED; - if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength, + if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength, MYF(0))) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { @@ -1514,7 +1516,7 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, { if (my_errno != HA_ERR_FOUND_DUPP_KEY) goto err; - DBUG_DUMP("record",(byte*) sort_param.record,share->base.pack_reclength); + DBUG_DUMP("record",(uchar*) sort_param.record,share->base.pack_reclength); mi_check_print_info(param,"Duplicate key %2d for record at %10s against new record at %10s", info->errkey+1, llstr(sort_param.start_recpos,llbuff), @@ -1660,7 +1662,7 @@ static int writekeys(MI_SORT_PARAM *sort_param) register uint i; uchar *key; MI_INFO *info= sort_param->sort_info->info; - byte *buff= sort_param->record; + uchar *buff= sort_param->record; my_off_t filepos= sort_param->filepos; DBUG_ENTER("writekeys"); @@ -1671,7 +1673,7 @@ static int writekeys(MI_SORT_PARAM *sort_param) { if (info->s->keyinfo[i].flag & HA_FULLTEXT ) { - if (_mi_ft_add(info,i,(char*) key,buff,filepos)) + if (_mi_ft_add(info, i, key, buff, filepos)) goto err; } #ifdef HAVE_SPATIAL @@ -1702,7 +1704,7 @@ static int writekeys(MI_SORT_PARAM *sort_param) { if (info->s->keyinfo[i].flag & HA_FULLTEXT) { - if (_mi_ft_del(info,i,(char*) key,buff,filepos)) + if (_mi_ft_del(info,i, key,buff,filepos)) break; } else @@ -1724,7 +1726,7 @@ static int writekeys(MI_SORT_PARAM *sort_param) /* Change all key-pointers that points to a records */ -int movepoint(register MI_INFO *info, byte *record, my_off_t oldpos, +int movepoint(register MI_INFO *info, uchar *record, my_off_t oldpos, my_off_t newpos, uint prot_key) { register uint i; @@ -1801,7 +1803,7 @@ int flush_blocks(MI_CHECK *param, KEY_CACHE *key_cache, File file) /* Sort index for more efficent reads */ -int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name) +int mi_sort_index(MI_CHECK *param, register MI_INFO *info, char * name) { reg2 uint key; reg1 MI_KEYDEF *keyinfo; @@ -1944,7 +1946,7 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, ("From page: %ld, keyoffset: %lu used_length: %d", (ulong) pagepos, (ulong) (keypos - buff), (int) used_length)); - DBUG_DUMP("buff",(byte*) buff,used_length); + DBUG_DUMP("buff",(uchar*) buff,used_length); goto err; } } @@ -1973,17 +1975,17 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, /* Fill block with zero and write it to the new index file */ length=mi_getint(buff); - bzero((byte*) buff+length,keyinfo->block_length-length); - if (my_pwrite(new_file,(byte*) buff,(uint) keyinfo->block_length, + bzero((uchar*) buff+length,keyinfo->block_length-length); + if (my_pwrite(new_file,(uchar*) buff,(uint) keyinfo->block_length, new_page_pos,MYF(MY_NABP | MY_WAIT_IF_FULL))) { mi_check_print_error(param,"Can't write indexblock, error: %d",my_errno); goto err; } - my_afree((gptr) buff); + my_afree((uchar*) buff); DBUG_RETURN(0); err: - my_afree((gptr) buff); + my_afree((uchar*) buff); DBUG_RETURN(1); } /* sort_one_index */ @@ -2054,13 +2056,13 @@ int filecopy(MI_CHECK *param, File to,File from,my_off_t start, VOID(my_seek(from,start,MY_SEEK_SET,MYF(0))); while (length > buff_length) { - if (my_read(from,(byte*) buff,buff_length,MYF(MY_NABP)) || - my_write(to,(byte*) buff,buff_length,param->myf_rw)) + if (my_read(from,(uchar*) buff,buff_length,MYF(MY_NABP)) || + my_write(to,(uchar*) buff,buff_length,param->myf_rw)) goto err; length-= buff_length; } - if (my_read(from,(byte*) buff,(uint) length,MYF(MY_NABP)) || - my_write(to,(byte*) buff,(uint) length,param->myf_rw)) + if (my_read(from,(uchar*) buff,(uint) length,MYF(MY_NABP)) || + my_write(to,(uchar*) buff,(uint) length,param->myf_rw)) goto err; if (buff != tmp_buff) my_free(buff,MYF(0)); @@ -2141,7 +2143,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, info->opt_flag|=WRITE_CACHE_USED; info->rec_cache.file=info->dfile; /* for sort_delete_record */ - if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength, + if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength, MYF(0))) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { @@ -2454,8 +2456,8 @@ err: my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff), MYF(MY_ALLOW_ZERO_PTR)); my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR)); - my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR)); - my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR)); my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR)); VOID(end_io_cache(¶m->read_cache)); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); @@ -2741,7 +2743,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, sort_param[i].filepos=new_header_length; sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length; - sort_param[i].record= (((char *)(sort_param+share->base.keys))+ + sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+ (share->base.pack_reclength * i)); if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff)) { @@ -2987,9 +2989,9 @@ err: pthread_cond_destroy (&sort_info.cond); pthread_mutex_destroy(&sort_info.mutex); - my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR)); - my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR)); - my_free((gptr) sort_param,MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) sort_param,MYF(MY_ALLOW_ZERO_PTR)); my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR)); VOID(end_io_cache(¶m->read_cache)); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); @@ -3119,7 +3121,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) int parallel_flag; uint found_record,b_type,left_length; my_off_t pos; - byte *to; + uchar *to; MI_BLOCK_INFO block_info; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param=sort_info->param; @@ -3197,7 +3199,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) llstr(param->search_after_block,llbuff), llstr(sort_param->start_recpos,llbuff2)); if (_mi_read_cache(&sort_param->read_cache, - (byte*) block_info.header,pos, + (uchar*) block_info.header,pos, MI_BLOCK_INFO_HEADER_LENGTH, (! found_record ? READING_NEXT : 0) | parallel_flag | READING_HEADER)) @@ -3461,7 +3463,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) case COMPRESSED_RECORD: for (searching=0 ;; searching=1, sort_param->pos++) { - if (_mi_read_cache(&sort_param->read_cache,(byte*) block_info.header, + if (_mi_read_cache(&sort_param->read_cache,(uchar*) block_info.header, sort_param->pos, share->pack.ref_length,READING_NEXT)) DBUG_RETURN(-1); @@ -3489,7 +3491,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) llstr(sort_param->pos,llbuff)); continue; } - if (_mi_read_cache(&sort_param->read_cache,(byte*) sort_param->rec_buff, + if (_mi_read_cache(&sort_param->read_cache,(uchar*) sort_param->rec_buff, block_info.filepos, block_info.rec_len, READING_NEXT)) { @@ -3545,8 +3547,8 @@ int sort_write_record(MI_SORT_PARAM *sort_param) int flag; uint length; ulong block_length,reclength; - byte *from; - byte block_buff[8]; + uchar *from; + uchar block_buff[8]; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param=sort_info->param; MI_INFO *info=sort_info->info; @@ -3585,7 +3587,7 @@ int sort_write_record(MI_SORT_PARAM *sort_param) DBUG_RETURN(1); sort_info->buff_length=reclength; } - from=sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER); + from= sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER); } /* We can use info->checksum here as only one thread calls this. */ info->checksum=mi_checksum(info,sort_param->record); @@ -3622,7 +3624,7 @@ int sort_write_record(MI_SORT_PARAM *sort_param) length+= save_pack_length((uint) share->pack.version, block_buff + length, info->blob_length); if (my_b_write(&info->rec_cache,block_buff,length) || - my_b_write(&info->rec_cache,(byte*) sort_param->rec_buff,reclength)) + my_b_write(&info->rec_cache,(uchar*) sort_param->rec_buff,reclength)) { mi_check_print_error(param,"%d when writing to datafile",my_errno); DBUG_RETURN(1); @@ -3922,7 +3924,7 @@ static int sort_insert_key(MI_SORT_PARAM *sort_param, /* Fill block with end-zero and write filled block */ mi_putint(anc_buff,key_block->last_length,nod_flag); - bzero((byte*) anc_buff+key_block->last_length, + bzero((uchar*) anc_buff+key_block->last_length, keyinfo->block_length- key_block->last_length); key_file_length=info->state->key_file_length; if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR) @@ -3934,10 +3936,10 @@ static int sort_insert_key(MI_SORT_PARAM *sort_param, if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, anc_buff)) DBUG_RETURN(1); } - else if (my_pwrite(info->s->kfile,(byte*) anc_buff, + else if (my_pwrite(info->s->kfile,(uchar*) anc_buff, (uint) keyinfo->block_length,filepos, param->myf_rw)) DBUG_RETURN(1); - DBUG_DUMP("buff",(byte*) anc_buff,mi_getint(anc_buff)); + DBUG_DUMP("buff",(uchar*) anc_buff,mi_getint(anc_buff)); /* Write separator-key to block in next level */ if (sort_insert_key(sort_param,key_block+1,key_block->lastkey,filepos)) @@ -4028,7 +4030,7 @@ int flush_pending_blocks(MI_SORT_PARAM *sort_param) if (nod_flag) _mi_kpointer(info,key_block->end_pos,filepos); key_file_length=info->state->key_file_length; - bzero((byte*) key_block->buff+length, keyinfo->block_length-length); + bzero((uchar*) key_block->buff+length, keyinfo->block_length-length); if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR) DBUG_RETURN(1); @@ -4039,10 +4041,10 @@ int flush_pending_blocks(MI_SORT_PARAM *sort_param) DFLT_INIT_HITS, key_block->buff)) DBUG_RETURN(1); } - else if (my_pwrite(info->s->kfile,(byte*) key_block->buff, + else if (my_pwrite(info->s->kfile,(uchar*) key_block->buff, (uint) keyinfo->block_length,filepos, myf_rw)) DBUG_RETURN(1); - DBUG_DUMP("buff",(byte*) key_block->buff,length); + DBUG_DUMP("buff",(uchar*) key_block->buff,length); nod_flag=1; } info->s->state.key_root[sort_param->key]=filepos; /* Last is root for tree */ @@ -4080,10 +4082,10 @@ int test_if_almost_full(MI_INFO *info) { if (info->s->options & HA_OPTION_COMPRESS_RECORD) return 0; - return (my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0))/10*9 > - (my_off_t) (info->s->base.max_key_file_length) || - my_seek(info->dfile,0L,MY_SEEK_END,MYF(0))/10*9 > - (my_off_t) info->s->base.max_data_file_length); + return my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)) / 10 * 9 > + (my_off_t) info->s->base.max_key_file_length || + my_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)) / 10 * 9 > + (my_off_t) info->s->base.max_data_file_length; } /* Recreate table with bigger more alloced record-data */ @@ -4113,34 +4115,34 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename) (param->testflag & T_UNPACK); if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys))) DBUG_RETURN(0); - memcpy((byte*) keyinfo,(byte*) share.keyinfo, + memcpy((uchar*) keyinfo,(uchar*) share.keyinfo, (size_t) (sizeof(MI_KEYDEF)*share.base.keys)); key_parts= share.base.all_key_parts; if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)* (key_parts+share.base.keys)))) { - my_afree((gptr) keyinfo); + my_afree((uchar*) keyinfo); DBUG_RETURN(1); } if (!(recdef=(MI_COLUMNDEF*) my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1)))) { - my_afree((gptr) keyinfo); - my_afree((gptr) keysegs); + my_afree((uchar*) keyinfo); + my_afree((uchar*) keysegs); DBUG_RETURN(1); } if (!(uniquedef=(MI_UNIQUEDEF*) my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1)))) { - my_afree((gptr) recdef); - my_afree((gptr) keyinfo); - my_afree((gptr) keysegs); + my_afree((uchar*) recdef); + my_afree((uchar*) keyinfo); + my_afree((uchar*) keysegs); DBUG_RETURN(1); } /* Copy the column definitions */ - memcpy((byte*) recdef,(byte*) share.rec, + memcpy((uchar*) recdef,(uchar*) share.rec, (size_t) (sizeof(MI_COLUMNDEF)*(share.base.fields+1))); for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++) { @@ -4152,7 +4154,7 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename) } /* Change the new key to point at the saved key segments */ - memcpy((byte*) keysegs,(byte*) share.keyparts, + memcpy((uchar*) keysegs,(uchar*) share.keyparts, (size_t) (sizeof(HA_KEYSEG)*(key_parts+share.base.keys+ share.state.header.uniques))); keyseg=keysegs; @@ -4169,7 +4171,7 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename) /* Copy the unique definitions and change them to point at the new key segments*/ - memcpy((byte*) uniquedef,(byte*) share.uniqueinfo, + memcpy((uchar*) uniquedef,(uchar*) share.uniqueinfo, (size_t) (sizeof(MI_UNIQUEDEF)*(share.state.header.uniques))); for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques; u_ptr != u_end ; u_ptr++) @@ -4251,10 +4253,10 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename) goto end; error=0; end: - my_afree((gptr) uniquedef); - my_afree((gptr) keyinfo); - my_afree((gptr) recdef); - my_afree((gptr) keysegs); + my_afree((uchar*) uniquedef); + my_afree((uchar*) keyinfo); + my_afree((uchar*) recdef); + my_afree((uchar*) keysegs); DBUG_RETURN(error); } @@ -4267,7 +4269,7 @@ int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile) if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile) { - char buff[MEMMAP_EXTRA_MARGIN]; + uchar buff[MEMMAP_EXTRA_MARGIN]; bzero(buff,sizeof(buff)); if (my_b_write(&info->rec_cache,buff,sizeof(buff))) { @@ -4357,7 +4359,7 @@ err: void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, my_bool repair_only) { - byte *record; + uchar *record; DBUG_ENTER("update_auto_increment_key"); if (!info->s->base.auto_key || @@ -4376,7 +4378,7 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, We have to use an allocated buffer instead of info->rec_buff as _mi_put_key_in_record() may use info->rec_buff */ - if (!(record= (byte*) my_malloc((uint) info->s->base.pack_reclength, + if (!(record= (uchar*) my_malloc((uint) info->s->base.pack_reclength, MYF(0)))) { mi_check_print_error(param,"Not enough memory for extra record"); @@ -4504,10 +4506,10 @@ void update_key_parts(MI_KEYDEF *keyinfo, ulong *rec_per_key_part, } -static ha_checksum mi_byte_checksum(const byte *buf, uint length) +static ha_checksum mi_byte_checksum(const uchar *buf, uint length) { ha_checksum crc; - const byte *end=buf+length; + const uchar *end=buf+length; for (crc=0; buf != end; buf++) crc=((crc << 1) + *((uchar*) buf)) + test(crc & (((ha_checksum) 1) << (8*sizeof(ha_checksum)-1))); diff --git a/storage/myisam/mi_checksum.c b/storage/myisam/mi_checksum.c index 711e87c1547..4e87de373bd 100644 --- a/storage/myisam/mi_checksum.c +++ b/storage/myisam/mi_checksum.c @@ -17,7 +17,7 @@ #include "myisamdef.h" -ha_checksum mi_checksum(MI_INFO *info, const byte *buf) +ha_checksum mi_checksum(MI_INFO *info, const uchar *buf) { uint i; ha_checksum crc=0; @@ -25,7 +25,7 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf) for (i=info->s->base.fields ; i-- ; buf+=(rec++)->length) { - const byte *pos; + const uchar *pos; ulong length; switch (rec->type) { case FIELD_BLOB: @@ -52,13 +52,13 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf) pos=buf; break; } - crc=my_checksum(crc, pos ? pos : "", length); + crc=my_checksum(crc, pos ? pos : (uchar*) "", length); } return crc; } -ha_checksum mi_static_checksum(MI_INFO *info, const byte *pos) +ha_checksum mi_static_checksum(MI_INFO *info, const uchar *pos) { return my_checksum(0, pos, info->s->base.reclength); } diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c index 47b7ba855c0..07105aea88d 100644 --- a/storage/myisam/mi_close.c +++ b/storage/myisam/mi_close.c @@ -87,8 +87,8 @@ int mi_close(register MI_INFO *info) #endif if (share->decode_trees) { - my_free((gptr) share->decode_trees,MYF(0)); - my_free((gptr) share->decode_tables,MYF(0)); + my_free((uchar*) share->decode_trees,MYF(0)); + my_free((uchar*) share->decode_tables,MYF(0)); } #ifdef THREAD thr_lock_delete(&share->lock); @@ -102,19 +102,19 @@ int mi_close(register MI_INFO *info) } } #endif - my_free((gptr) info->s,MYF(0)); + my_free((uchar*) info->s,MYF(0)); } pthread_mutex_unlock(&THR_LOCK_myisam); if (info->ftparser_param) { - my_free((gptr)info->ftparser_param, MYF(0)); + my_free((uchar*)info->ftparser_param, MYF(0)); info->ftparser_param= 0; } if (info->dfile >= 0 && my_close(info->dfile,MYF(0))) error = my_errno; myisam_log_command(MI_LOG_CLOSE,info,NULL,0,error); - my_free((gptr) info,MYF(0)); + my_free((uchar*) info,MYF(0)); if (error) { diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c index 2b8cbcc7da5..0cac5f08b3b 100644 --- a/storage/myisam/mi_create.c +++ b/storage/myisam/mi_create.c @@ -76,7 +76,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, LINT_INIT(file); errpos=0; options=0; - bzero((byte*) &share,sizeof(share)); + bzero((uchar*) &share,sizeof(share)); if (flags & HA_DONT_TOUCH_DATA) { @@ -158,6 +158,10 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, rec--; if (rec->type == (int) FIELD_SKIP_ZERO && rec->length == 1) { + /* + NOTE1: here we change a field type FIELD_SKIP_ZERO -> + FIELD_NORMAL + */ rec->type=(int) FIELD_NORMAL; packed--; min_pack_length++; @@ -494,7 +498,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, goto err; } - bmove(share.state.header.file_version,(byte*) myisam_file_magic,4); + bmove(share.state.header.file_version,(uchar*) myisam_file_magic,4); ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ? HA_OPTION_COMPRESS_RECORD | HA_OPTION_TEMP_COMPRESS_RECORD: 0); @@ -569,6 +573,10 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, pthread_mutex_lock(&THR_LOCK_myisam); + /* + NOTE: For test_if_reopen() we need a real path name. Hence we need + MY_RETURN_REAL_PATH for every fn_format(filename, ...). + */ if (ci->index_file_name) { char *iext= strrchr(ci->index_file_name, '.'); @@ -580,13 +588,14 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, if ((path= strrchr(ci->index_file_name, FN_LIBCHAR))) *path= '\0'; fn_format(filename, name, ci->index_file_name, MI_NAME_IEXT, - MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT); + MY_REPLACE_DIR | MY_UNPACK_FILENAME | + MY_RETURN_REAL_PATH | MY_APPEND_EXT); } else { fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT, - MY_UNPACK_FILENAME | (have_iext ? MY_REPLACE_EXT : - MY_APPEND_EXT)); + MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH | + (have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT)); } fn_format(linkname, name, "", MI_NAME_IEXT, MY_UNPACK_FILENAME|MY_APPEND_EXT); @@ -599,13 +608,14 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } else { + char *iext= strrchr(name, '.'); + int have_iext= iext && !strcmp(iext, MI_NAME_IEXT); fn_format(filename, name, "", MI_NAME_IEXT, - (MY_UNPACK_FILENAME | - (flags & HA_DONT_TOUCH_DATA) ? MY_RETURN_REAL_PATH : 0) | - MY_APPEND_EXT); + MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH | + (have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT)); linkname_ptr=0; /* Replace the current file */ - create_flag=MY_DELETE_OLD; + create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD; } /* @@ -614,6 +624,9 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, A TRUNCATE command checks for the table in the cache only and could be fooled to believe, the table is not open. Pull the emergency brake in this situation. (Bug #8306) + + NOTE: The filename is compared against unique_file_name of every + open table. Hence we need a real path here. */ if (test_if_reopen(filename)) { @@ -676,7 +689,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, fn_format(filename,name,"", MI_NAME_DEXT, MY_UNPACK_FILENAME | MY_APPEND_EXT); linkname_ptr=0; - create_flag=MY_DELETE_OLD; + create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD; } if ((dfile= my_create_with_symlink(linkname_ptr, filename, 0, create_mode, diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c index 409930ff7fb..6fe31f30c19 100644 --- a/storage/myisam/mi_delete.c +++ b/storage/myisam/mi_delete.c @@ -32,7 +32,7 @@ static int _mi_ck_real_delete(register MI_INFO *info,MI_KEYDEF *keyinfo, uchar *key, uint key_length, my_off_t *root); -int mi_delete(MI_INFO *info,const byte *record) +int mi_delete(MI_INFO *info,const uchar *record) { uint i; uchar *old_key; @@ -78,7 +78,7 @@ int mi_delete(MI_INFO *info,const byte *record) info->s->keyinfo[i].version++; if (info->s->keyinfo[i].flag & HA_FULLTEXT ) { - if (_mi_ft_del(info,i,(char*) old_key,record,info->lastpos)) + if (_mi_ft_del(info,i, old_key,record,info->lastpos)) goto err; } else @@ -100,7 +100,7 @@ int mi_delete(MI_INFO *info,const byte *record) info->state->records--; mi_sizestore(lastpos,info->lastpos); - myisam_log_command(MI_LOG_DELETE,info,(byte*) lastpos,sizeof(lastpos),0); + myisam_log_command(MI_LOG_DELETE,info,(uchar*) lastpos,sizeof(lastpos),0); VOID(_mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE)); allow_break(); /* Allow SIGHUP & SIGINT */ if (info->invalidator != 0) @@ -114,7 +114,7 @@ int mi_delete(MI_INFO *info,const byte *record) err: save_errno=my_errno; mi_sizestore(lastpos,info->lastpos); - myisam_log_command(MI_LOG_DELETE,info,(byte*) lastpos, sizeof(lastpos),0); + myisam_log_command(MI_LOG_DELETE,info,(uchar*) lastpos, sizeof(lastpos),0); if (save_errno != HA_ERR_RECORD_CHANGED) { mi_print_error(info->s, HA_ERR_CRASHED); @@ -198,7 +198,7 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo, } } err: - my_afree((gptr) root_buff); + my_afree((uchar*) root_buff); DBUG_PRINT("exit",("Return: %d",error)); DBUG_RETURN(error); } /* _mi_ck_real_delete */ @@ -223,7 +223,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_off_t leaf_page,next_block; uchar lastkey[MI_MAX_KEY_BUFF]; DBUG_ENTER("d_search"); - DBUG_DUMP("page",(byte*) anc_buff,mi_getint(anc_buff)); + DBUG_DUMP("page",(uchar*) anc_buff,mi_getint(anc_buff)); search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY; flag=(*keyinfo->bin_search)(info,keyinfo,anc_buff,key, search_key_length, @@ -250,7 +250,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (info->ft1_to_ft2) { /* we're in ft1->ft2 conversion mode. Saving key data */ - insert_dynamic(info->ft1_to_ft2, (char*) (lastkey+off)); + insert_dynamic(info->ft1_to_ft2, (lastkey+off)); } else { @@ -381,14 +381,14 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, ret_value|=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,anc_buff); else { - DBUG_DUMP("page",(byte*) anc_buff,mi_getint(anc_buff)); + DBUG_DUMP("page",(uchar*) anc_buff,mi_getint(anc_buff)); } - my_afree((byte*) leaf_buff); + my_afree((uchar*) leaf_buff); DBUG_PRINT("exit",("Return: %d",ret_value)); DBUG_RETURN(ret_value); err: - my_afree((byte*) leaf_buff); + my_afree((uchar*) leaf_buff); DBUG_PRINT("exit",("Error: %d",my_errno)); DBUG_RETURN (-1); } /* d_search */ @@ -411,7 +411,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, DBUG_ENTER("del"); DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page, (ulong) keypos)); - DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff)); + DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff)); endpos=leaf_buff+mi_getint(leaf_buff); if (!(key_start=_mi_get_last_key(info,keyinfo,leaf_buff,keybuff,endpos, @@ -428,7 +428,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, ret_value= -1; else { - DBUG_DUMP("next_page",(byte*) next_buff,mi_getint(next_buff)); + DBUG_DUMP("next_page",(uchar*) next_buff,mi_getint(next_buff)); if ((ret_value=del(info,keyinfo,key,anc_buff,next_page,next_buff, keypos,next_block,ret_key)) >0) { @@ -455,7 +455,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff)) goto err; } - my_afree((byte*) next_buff); + my_afree((uchar*) next_buff); DBUG_RETURN(ret_value); } @@ -479,7 +479,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, prev_key, prev_key, keybuff,&s_temp); if (length > 0) - bmove_upp((byte*) endpos+length,(byte*) endpos,(uint) (endpos-keypos)); + bmove_upp((uchar*) endpos+length,(uchar*) endpos,(uint) (endpos-keypos)); else bmove(keypos,keypos-length, (int) (endpos-keypos)+length); (*keyinfo->store_key)(keyinfo,keypos,&s_temp); @@ -517,8 +517,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, DBUG_ENTER("underflow"); DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx",(long) leaf_page, (ulong) keypos)); - DBUG_DUMP("anc_buff",(byte*) anc_buff,mi_getint(anc_buff)); - DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff)); + DBUG_DUMP("anc_buff",(uchar*) anc_buff,mi_getint(anc_buff)); + DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff)); buff=info->buff; info->buff_used=1; @@ -554,10 +554,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff,0)) goto err; buff_length=mi_getint(buff); - DBUG_DUMP("next",(byte*) buff,buff_length); + DBUG_DUMP("next",(uchar*) buff,buff_length); /* find keys to make a big key-page */ - bmove((byte*) next_keypos-key_reflength,(byte*) buff+2, + bmove((uchar*) next_keypos-key_reflength,(uchar*) buff+2, key_reflength); if (!_mi_get_last_key(info,keyinfo,anc_buff,anc_key,next_keypos,&length) || !_mi_get_last_key(info,keyinfo,leaf_buff,leaf_key, @@ -572,8 +572,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, length=buff_length-p_length; endpos=buff+length+leaf_length+t_length; /* buff will always be larger than before !*/ - bmove_upp((byte*) endpos, (byte*) buff+buff_length,length); - memcpy((byte*) buff, (byte*) leaf_buff,(size_t) leaf_length); + bmove_upp((uchar*) endpos, (uchar*) buff+buff_length,length); + memcpy((uchar*) buff, (uchar*) leaf_buff,(size_t) leaf_length); (*keyinfo->store_key)(keyinfo,buff+leaf_length,&s_temp); buff_length=(uint) (endpos-buff); mi_putint(buff,buff_length,nod_flag); @@ -589,7 +589,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (buff_length <= keyinfo->block_length) { /* Keys in one page */ - memcpy((byte*) leaf_buff,(byte*) buff,(size_t) buff_length); + memcpy((uchar*) leaf_buff,(uchar*) buff,(size_t) buff_length); if (_mi_dispose(info,keyinfo,next_page,DFLT_INIT_HITS)) goto err; } @@ -605,7 +605,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, &key_length, &after_key))) goto err; length=(uint) (half_pos-buff); - memcpy((byte*) leaf_buff,(byte*) buff,(size_t) length); + memcpy((uchar*) leaf_buff,(uchar*) buff,(size_t) length); mi_putint(leaf_buff,length,nod_flag); /* Correct new keypointer to leaf_page */ @@ -619,7 +619,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, prev_key, prev_key, leaf_key, &s_temp); if (t_length >= 0) - bmove_upp((byte*) endpos+t_length,(byte*) endpos, + bmove_upp((uchar*) endpos+t_length,(uchar*) endpos, (uint) (endpos-keypos)); else bmove(keypos,keypos-t_length,(uint) (endpos-keypos)+t_length); @@ -628,7 +628,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, /* Store key first in new page */ if (nod_flag) - bmove((byte*) buff+2,(byte*) half_pos-nod_flag,(size_t) nod_flag); + bmove((uchar*) buff+2,(uchar*) half_pos-nod_flag,(size_t) nod_flag); if (!(*keyinfo->get_key)(keyinfo,nod_flag,&half_pos,leaf_key)) goto err; t_length=(int) (*keyinfo->pack_key)(keyinfo, nod_flag, (uchar*) 0, @@ -636,7 +636,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, leaf_key, &s_temp); /* t_length will always be > 0 for a new page !*/ length=(uint) ((buff+mi_getint(buff))-half_pos); - bmove((byte*) buff+p_length+t_length,(byte*) half_pos,(size_t) length); + bmove((uchar*) buff+p_length+t_length,(uchar*) half_pos,(size_t) length); (*keyinfo->store_key)(keyinfo,buff+p_length,&s_temp); mi_putint(buff,length+t_length+p_length,nod_flag); @@ -659,10 +659,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, goto err; buff_length=mi_getint(buff); endpos=buff+buff_length; - DBUG_DUMP("prev",(byte*) buff,buff_length); + DBUG_DUMP("prev",(uchar*) buff,buff_length); /* find keys to make a big key-page */ - bmove((byte*) next_keypos - key_reflength,(byte*) leaf_buff+2, + bmove((uchar*) next_keypos - key_reflength,(uchar*) leaf_buff+2, key_reflength); next_keypos=keypos; if (!(*keyinfo->get_key)(keyinfo,key_reflength,&next_keypos, @@ -679,10 +679,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, prev_key, prev_key, anc_key, &s_temp); if (t_length >= 0) - bmove((byte*) endpos+t_length,(byte*) leaf_buff+p_length, + bmove((uchar*) endpos+t_length,(uchar*) leaf_buff+p_length, (size_t) (leaf_length-p_length)); else /* We gained space */ - bmove((byte*) endpos,(byte*) leaf_buff+((int) p_length-t_length), + bmove((uchar*) endpos,(uchar*) leaf_buff+((int) p_length-t_length), (size_t) (leaf_length-p_length+t_length)); (*keyinfo->store_key)(keyinfo,endpos,&s_temp); @@ -715,8 +715,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, goto err; _mi_kpointer(info,leaf_key+key_length,leaf_page); /* Save key in anc_buff */ - DBUG_DUMP("anc_buff",(byte*) anc_buff,anc_length); - DBUG_DUMP("key_to_anc",(byte*) leaf_key,key_length); + DBUG_DUMP("anc_buff",(uchar*) anc_buff,anc_length); + DBUG_DUMP("key_to_anc",(uchar*) leaf_key,key_length); temp_pos=anc_buff+anc_length; t_length=(*keyinfo->pack_key)(keyinfo,key_reflength, @@ -725,7 +725,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, anc_pos, anc_pos, leaf_key,&s_temp); if (t_length > 0) - bmove_upp((byte*) temp_pos+t_length,(byte*) temp_pos, + bmove_upp((uchar*) temp_pos+t_length,(uchar*) temp_pos, (uint) (temp_pos-keypos)); else bmove(keypos,keypos-t_length,(uint) (temp_pos-keypos)+t_length); @@ -734,15 +734,15 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, /* Store first key on new page */ if (nod_flag) - bmove((byte*) leaf_buff+2,(byte*) half_pos-nod_flag,(size_t) nod_flag); + bmove((uchar*) leaf_buff+2,(uchar*) half_pos-nod_flag,(size_t) nod_flag); if (!(length=(*keyinfo->get_key)(keyinfo,nod_flag,&half_pos,leaf_key))) goto err; - DBUG_DUMP("key_to_leaf",(byte*) leaf_key,length); + DBUG_DUMP("key_to_leaf",(uchar*) leaf_key,length); t_length=(*keyinfo->pack_key)(keyinfo,nod_flag, (uchar*) 0, (uchar*) 0, (uchar*) 0, leaf_key, &s_temp); length=(uint) ((buff+buff_length)-half_pos); DBUG_PRINT("info",("t_length: %d length: %d",t_length,(int) length)); - bmove((byte*) leaf_buff+p_length+t_length,(byte*) half_pos, + bmove((uchar*) leaf_buff+p_length+t_length,(uchar*) half_pos, (size_t) length); (*keyinfo->store_key)(keyinfo,leaf_buff+p_length,&s_temp); mi_putint(leaf_buff,length+t_length+p_length,nod_flag); @@ -806,7 +806,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, if (next_length > prev_length) { /* We have to copy data from the current key to the next key */ - bmove_upp((char*) keypos,(char*) (lastkey+next_length), + bmove_upp(keypos, (lastkey+next_length), (next_length-prev_length)); keypos-=(next_length-prev_length)+prev_pack_length; store_key_length(keypos,prev_length); @@ -853,7 +853,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, if (next_length >= prev_length) { /* Key after is based on deleted key */ uint pack_length,tmp; - bmove_upp((char*) keypos,(char*) (lastkey+next_length), + bmove_upp(keypos, (lastkey+next_length), tmp=(next_length-prev_length)); rest_length+=tmp; pack_length= prev_length ? get_pack_length(rest_length): 0; @@ -886,7 +886,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, } } end: - bmove((byte*) start,(byte*) start+s_length, + bmove((uchar*) start,(uchar*) start+s_length, (uint) (page_end-start-s_length)); DBUG_RETURN((uint) s_length); } /* remove_key */ diff --git a/storage/myisam/mi_delete_all.c b/storage/myisam/mi_delete_all.c index a17514486d5..dea0385cbca 100644 --- a/storage/myisam/mi_delete_all.c +++ b/storage/myisam/mi_delete_all.c @@ -47,7 +47,7 @@ int mi_delete_all_rows(MI_INFO *info) for (i=0 ; i < share->base.keys ; i++) state->key_root[i]= HA_OFFSET_ERROR; - myisam_log_command(MI_LOG_DELETE_ALL,info,(byte*) 0,0,0); + myisam_log_command(MI_LOG_DELETE_ALL,info,(uchar*) 0,0,0); /* If we are using delayed keys or if the user has done changes to the tables since it was locked then there may be key blocks in the key cache diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c index 642efbd4389..cdd70abe9ad 100644 --- a/storage/myisam/mi_dynrec.c +++ b/storage/myisam/mi_dynrec.c @@ -28,15 +28,15 @@ /* Enough for comparing if number is zero */ static char zero_string[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; -static int write_dynamic_record(MI_INFO *info,const byte *record, +static int write_dynamic_record(MI_INFO *info,const uchar *record, ulong reclength); static int _mi_find_writepos(MI_INFO *info,ulong reclength,my_off_t *filepos, ulong *length); -static int update_dynamic_record(MI_INFO *info,my_off_t filepos,byte *record, +static int update_dynamic_record(MI_INFO *info,my_off_t filepos,uchar *record, ulong reclength); static int delete_dynamic_record(MI_INFO *info,my_off_t filepos, uint second_read); -static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos, +static int _mi_cmp_buffer(File file, const uchar *buff, my_off_t filepos, uint length); #ifdef THREAD @@ -71,19 +71,27 @@ my_bool mi_dynmap_file(MI_INFO *info, my_off_t size) DBUG_PRINT("warning", ("File is too large for mmap")); DBUG_RETURN(1); } - info->s->file_map= (byte*) + /* + I wonder if it is good to use MAP_NORESERVE. From the Linux man page: + MAP_NORESERVE + Do not reserve swap space for this mapping. When swap space is + reserved, one has the guarantee that it is possible to modify the + mapping. When swap space is not reserved one might get SIGSEGV + upon a write if no physical memory is available. + */ + info->s->file_map= (uchar*) my_mmap(0, (size_t)(size + MEMMAP_EXTRA_MARGIN), info->s->mode==O_RDONLY ? PROT_READ : PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, info->dfile, 0L); - if (info->s->file_map == (byte*) MAP_FAILED) + if (info->s->file_map == (uchar*) MAP_FAILED) { info->s->file_map= NULL; DBUG_RETURN(1); } #if defined(HAVE_MADVISE) - madvise(info->s->file_map, size, MADV_RANDOM); + madvise((char*) info->s->file_map, size, MADV_RANDOM); #endif info->s->mmaped_length= size; DBUG_RETURN(0); @@ -104,7 +112,7 @@ void mi_remap_file(MI_INFO *info, my_off_t size) { if (info->s->file_map) { - VOID(my_munmap(info->s->file_map, + VOID(my_munmap((char*) info->s->file_map, (size_t) info->s->mmaped_length + MEMMAP_EXTRA_MARGIN)); mi_dynmap_file(info, size); } @@ -127,8 +135,8 @@ void mi_remap_file(MI_INFO *info, my_off_t size) 0 ok */ -uint mi_mmap_pread(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags) +size_t mi_mmap_pread(MI_INFO *info, uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags) { DBUG_PRINT("info", ("mi_read with mmap %d\n", info->dfile)); if (info->s->concurrent_insert) @@ -159,8 +167,8 @@ uint mi_mmap_pread(MI_INFO *info, byte *Buffer, /* wrapper for my_pread in case if mmap isn't used */ -uint mi_nommap_pread(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags) +size_t mi_nommap_pread(MI_INFO *info, uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags) { return my_pread(info->dfile, Buffer, Count, offset, MyFlags); } @@ -182,8 +190,8 @@ uint mi_nommap_pread(MI_INFO *info, byte *Buffer, !=0 error. In this case return error from pwrite */ -uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags) +size_t mi_mmap_pwrite(MI_INFO *info, const uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags) { DBUG_PRINT("info", ("mi_write with mmap %d\n", info->dfile)); if (info->s->concurrent_insert) @@ -216,28 +224,28 @@ uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer, /* wrapper for my_pwrite in case if mmap isn't used */ -uint mi_nommap_pwrite(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags) +size_t mi_nommap_pwrite(MI_INFO *info, const uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags) { return my_pwrite(info->dfile, Buffer, Count, offset, MyFlags); } -int _mi_write_dynamic_record(MI_INFO *info, const byte *record) +int _mi_write_dynamic_record(MI_INFO *info, const uchar *record) { ulong reclength=_mi_rec_pack(info,info->rec_buff,record); return (write_dynamic_record(info,info->rec_buff,reclength)); } -int _mi_update_dynamic_record(MI_INFO *info, my_off_t pos, const byte *record) +int _mi_update_dynamic_record(MI_INFO *info, my_off_t pos, const uchar *record) { uint length=_mi_rec_pack(info,info->rec_buff,record); return (update_dynamic_record(info,pos,info->rec_buff,length)); } -int _mi_write_blob_record(MI_INFO *info, const byte *record) +int _mi_write_blob_record(MI_INFO *info, const uchar *record) { - byte *rec_buff; + uchar *rec_buff; int error; ulong reclength,reclength2,extra; @@ -252,7 +260,7 @@ int _mi_write_blob_record(MI_INFO *info, const byte *record) return -1; } #endif - if (!(rec_buff=(byte*) my_alloca(reclength))) + if (!(rec_buff=(uchar*) my_alloca(reclength))) { my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */ return(-1); @@ -269,9 +277,9 @@ int _mi_write_blob_record(MI_INFO *info, const byte *record) } -int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const byte *record) +int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const uchar *record) { - byte *rec_buff; + uchar *rec_buff; int error; ulong reclength,extra; @@ -286,7 +294,7 @@ int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const byte *record) return -1; } #endif - if (!(rec_buff=(byte*) my_alloca(reclength))) + if (!(rec_buff=(uchar*) my_alloca(reclength))) { my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */ return(-1); @@ -309,7 +317,7 @@ int _mi_delete_dynamic_record(MI_INFO *info) /* Write record to data-file */ -static int write_dynamic_record(MI_INFO *info, const byte *record, +static int write_dynamic_record(MI_INFO *info, const uchar *record, ulong reclength) { int flag; @@ -325,7 +333,7 @@ static int write_dynamic_record(MI_INFO *info, const byte *record, if (_mi_write_part_record(info,filepos,length, (info->append_insert_at_end ? HA_OFFSET_ERROR : info->s->state.dellink), - (byte**) &record,&reclength,&flag)) + (uchar**) &record,&reclength,&flag)) goto err; } while (reclength); @@ -416,7 +424,7 @@ static bool unlink_deleted_block(MI_INFO *info, MI_BLOCK_INFO *block_info) & BLOCK_DELETED)) DBUG_RETURN(1); /* Something is wrong */ mi_sizestore(tmp.header+4,block_info->next_filepos); - if (info->s->file_write(info,(char*) tmp.header+4,8, + if (info->s->file_write(info, tmp.header+4,8, block_info->prev_filepos+4, MYF(MY_NABP))) DBUG_RETURN(1); /* Unlink block from next block */ @@ -426,7 +434,7 @@ static bool unlink_deleted_block(MI_INFO *info, MI_BLOCK_INFO *block_info) & BLOCK_DELETED)) DBUG_RETURN(1); /* Something is wrong */ mi_sizestore(tmp.header+12,block_info->prev_filepos); - if (info->s->file_write(info,(char*) tmp.header+12,8, + if (info->s->file_write(info, tmp.header+12,8, block_info->next_filepos+12, MYF(MY_NABP))) DBUG_RETURN(1); @@ -475,7 +483,7 @@ static int update_backward_delete_link(MI_INFO *info, my_off_t delete_block, if (_mi_get_block_info(&block_info,info->dfile,delete_block) & BLOCK_DELETED) { - char buff[8]; + uchar buff[8]; mi_sizestore(buff,filepos); if (info->s->file_write(info,buff, 8, delete_block+12, MYF(MY_NABP))) DBUG_RETURN(1); /* Error on write */ @@ -535,7 +543,7 @@ static int delete_dynamic_record(MI_INFO *info, my_off_t filepos, bfill(block_info.header+12,8,255); else mi_sizestore(block_info.header+12,block_info.next_filepos); - if (info->s->file_write(info,(byte*) block_info.header,20,filepos, + if (info->s->file_write(info,(uchar*) block_info.header,20,filepos, MYF(MY_NABP))) DBUG_RETURN(1); info->s->state.dellink = filepos; @@ -558,12 +566,12 @@ int _mi_write_part_record(MI_INFO *info, my_off_t filepos, /* points at empty block */ ulong length, /* length of block */ my_off_t next_filepos,/* Next empty block */ - byte **record, /* pointer to record ptr */ + uchar **record, /* pointer to record ptr */ ulong *reclength, /* length of *record */ int *flag) /* *flag == 0 if header */ { ulong head_length,res_length,extra_length,long_block,del_length; - byte *pos,*record_end; + uchar *pos,*record_end; my_off_t next_delete_block; uchar temp[MI_SPLIT_LENGTH+MI_DYN_DELETE_BLOCK_HEADER]; DBUG_ENTER("_mi_write_part_record"); @@ -607,7 +615,7 @@ int _mi_write_part_record(MI_INFO *info, temp[0]=13; mi_int4store(temp+1,*reclength); mi_int3store(temp+5,length-head_length); - mi_sizestore((byte*) temp+8,next_filepos); + mi_sizestore((uchar*) temp+8,next_filepos); } else { @@ -617,13 +625,13 @@ int _mi_write_part_record(MI_INFO *info, { mi_int3store(temp+1,*reclength); mi_int3store(temp+4,length-head_length); - mi_sizestore((byte*) temp+7,next_filepos); + mi_sizestore((uchar*) temp+7,next_filepos); } else { mi_int2store(temp+1,*reclength); mi_int2store(temp+3,length-head_length); - mi_sizestore((byte*) temp+5,next_filepos); + mi_sizestore((uchar*) temp+5,next_filepos); } } } @@ -634,12 +642,12 @@ int _mi_write_part_record(MI_INFO *info, if (long_block) { mi_int3store(temp+1,length-head_length); - mi_sizestore((byte*) temp+4,next_filepos); + mi_sizestore((uchar*) temp+4,next_filepos); } else { mi_int2store(temp+1,length-head_length); - mi_sizestore((byte*) temp+3,next_filepos); + mi_sizestore((uchar*) temp+3,next_filepos); } } } @@ -660,14 +668,14 @@ int _mi_write_part_record(MI_INFO *info, } length= *reclength+head_length; /* Write only what is needed */ } - DBUG_DUMP("header",(byte*) temp,head_length); + DBUG_DUMP("header",(uchar*) temp,head_length); /* Make a long block for one write */ record_end= *record+length-head_length; del_length=(res_length ? MI_DYN_DELETE_BLOCK_HEADER : 0); - bmove((byte*) (*record-head_length),(byte*) temp,head_length); + bmove((uchar*) (*record-head_length),(uchar*) temp,head_length); memcpy(temp,record_end,(size_t) (extra_length+del_length)); - bzero((byte*) record_end,extra_length); + bzero((uchar*) record_end,extra_length); if (res_length) { @@ -707,18 +715,18 @@ int _mi_write_part_record(MI_INFO *info, if (info->update & HA_STATE_EXTEND_BLOCK) { info->update&= ~HA_STATE_EXTEND_BLOCK; - if (my_block_write(&info->rec_cache,(byte*) *record-head_length, + if (my_block_write(&info->rec_cache,(uchar*) *record-head_length, length+extra_length+del_length,filepos)) goto err; } - else if (my_b_write(&info->rec_cache,(byte*) *record-head_length, + else if (my_b_write(&info->rec_cache,(uchar*) *record-head_length, length+extra_length+del_length)) goto err; } else { info->rec_cache.seek_not_done=1; - if (info->s->file_write(info,(byte*) *record-head_length,length+extra_length+ + if (info->s->file_write(info,(uchar*) *record-head_length,length+extra_length+ del_length,filepos,info->s->write_flag)) goto err; } @@ -744,7 +752,7 @@ err: /* update record from datafile */ -static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record, +static int update_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *record, ulong reclength) { int flag; @@ -828,7 +836,7 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record, mi_int3store(del_block.header+1, rest_length); mi_sizestore(del_block.header+4,info->s->state.dellink); bfill(del_block.header+12,8,255); - if (info->s->file_write(info,(byte*) del_block.header,20, next_pos, + if (info->s->file_write(info,(uchar*) del_block.header,20, next_pos, MYF(MY_NABP))) DBUG_RETURN(1); info->s->state.dellink= next_pos; @@ -867,10 +875,11 @@ err: /* Pack a record. Return new reclength */ -uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) +uint _mi_rec_pack(MI_INFO *info, register uchar *to, + register const uchar *from) { uint length,new_length,flag,bit,i; - char *pos,*end,*startpos,*packpos; + uchar *pos,*end,*startpos,*packpos; enum en_fieldtype type; reg3 MI_COLUMNDEF *rec; MI_BLOB *blob; @@ -893,7 +902,7 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) { char *temp_pos; size_t tmp_length=length-mi_portable_sizeof_char_ptr; - memcpy((byte*) to,from,tmp_length); + memcpy((uchar*) to,from,tmp_length); memcpy_fixed(&temp_pos,from+tmp_length,sizeof(char*)); memcpy(to+tmp_length,temp_pos,(size_t) blob->length); to+=tmp_length+blob->length; @@ -902,17 +911,17 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) } else if (type == FIELD_SKIP_ZERO) { - if (memcmp((byte*) from,zero_string,length) == 0) + if (memcmp((uchar*) from,zero_string,length) == 0) flag|=bit; else { - memcpy((byte*) to,from,(size_t) length); to+=length; + memcpy((uchar*) to,from,(size_t) length); to+=length; } } else if (type == FIELD_SKIP_ENDSPACE || type == FIELD_SKIP_PRESPACE) { - pos= (byte*) from; end= (byte*) from + length; + pos= (uchar*) from; end= (uchar*) from + length; if (type == FIELD_SKIP_ENDSPACE) { /* Pack trailing spaces */ while (end > from && *(end-1) == ' ') @@ -935,7 +944,7 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) } else *to++= (char) new_length; - memcpy((byte*) to,pos,(size_t) new_length); to+=new_length; + memcpy((uchar*) to,pos,(size_t) new_length); to+=new_length; flag|=bit; } else @@ -992,11 +1001,11 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) Returns 0 if record is ok. */ -my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, +my_bool _mi_rec_check(MI_INFO *info,const uchar *record, uchar *rec_buff, ulong packed_length, my_bool with_checksum) { uint length,new_length,flag,bit,i; - char *pos,*end,*packpos,*to; + uchar *pos,*end,*packpos,*to; enum en_fieldtype type; reg3 MI_COLUMNDEF *rec; DBUG_ENTER("_mi_rec_check"); @@ -1021,7 +1030,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, } else if (type == FIELD_SKIP_ZERO) { - if (memcmp((byte*) record,zero_string,length) == 0) + if (memcmp((uchar*) record,zero_string,length) == 0) { if (!(flag & bit)) goto err; @@ -1032,7 +1041,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, else if (type == FIELD_SKIP_ENDSPACE || type == FIELD_SKIP_PRESPACE) { - pos= (byte*) record; end= (byte*) record + length; + pos= (uchar*) record; end= (uchar*) record + length; if (type == FIELD_SKIP_ENDSPACE) { /* Pack trailing spaces */ while (end > record && *(end-1) == ' ') @@ -1114,12 +1123,12 @@ err: /* Returns -1 and my_errno =HA_ERR_RECORD_DELETED if reclength isn't */ /* right. Returns reclength (>0) if ok */ -ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, +ulong _mi_rec_unpack(register MI_INFO *info, register uchar *to, uchar *from, ulong found_length) { uint flag,bit,length,rec_length,min_pack_length; enum en_fieldtype type; - byte *from_end,*to_end,*packpos; + uchar *from_end,*to_end,*packpos; reg3 MI_COLUMNDEF *rec,*end_field; DBUG_ENTER("_mi_rec_unpack"); @@ -1165,7 +1174,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, if (flag & bit) { if (type == FIELD_BLOB || type == FIELD_SKIP_ZERO) - bzero((byte*) to,rec_length); + bzero((uchar*) to,rec_length); else if (type == FIELD_SKIP_ENDSPACE || type == FIELD_SKIP_PRESPACE) { @@ -1187,13 +1196,13 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, goto err; if (type == FIELD_SKIP_ENDSPACE) { - memcpy(to,(byte*) from,(size_t) length); - bfill((byte*) to+length,rec_length-length,' '); + memcpy(to,(uchar*) from,(size_t) length); + bfill((uchar*) to+length,rec_length-length,' '); } else { - bfill((byte*) to,rec_length-length,' '); - memcpy(to+rec_length-length,(byte*) from,(size_t) length); + bfill((uchar*) to,rec_length-length,' '); + memcpy(to+rec_length-length,(uchar*) from,(size_t) length); } from+=length; } @@ -1207,9 +1216,9 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, from_left - size_length < blob_length || from_left - size_length - blob_length < min_pack_length) goto err; - memcpy((byte*) to,(byte*) from,(size_t) size_length); + memcpy((uchar*) to,(uchar*) from,(size_t) size_length); from+=size_length; - memcpy_fixed((byte*) to+size_length,(byte*) &from,sizeof(char*)); + memcpy_fixed((uchar*) to+size_length,(uchar*) &from,sizeof(char*)); from+=blob_length; } else @@ -1218,7 +1227,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, min_pack_length--; if (min_pack_length + rec_length > (uint) (from_end - from)) goto err; - memcpy(to,(byte*) from,(size_t) rec_length); from+=rec_length; + memcpy(to,(uchar*) from,(size_t) rec_length); from+=rec_length; } if ((bit= bit << 1) >= 256) { @@ -1230,7 +1239,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, if (min_pack_length > (uint) (from_end - from)) goto err; min_pack_length-=rec_length; - memcpy(to, (byte*) from, (size_t) rec_length); + memcpy(to, (uchar*) from, (size_t) rec_length); from+=rec_length; } } @@ -1243,14 +1252,14 @@ err: my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx", (long) to, (long) to_end, (long) from, (long) from_end)); - DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length); + DBUG_DUMP("from",(uchar*) info->rec_buff,info->s->base.min_pack_length); DBUG_RETURN(MY_FILE_ERROR); } /* _mi_rec_unpack */ /* Calc length of blob. Update info in blobs->length */ -ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record) +ulong _my_calc_total_blob_length(MI_INFO *info, const uchar *record) { ulong length; MI_BLOB *blob,*end; @@ -1266,7 +1275,7 @@ ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record) } -ulong _mi_calc_blob_length(uint length, const byte *pos) +ulong _mi_calc_blob_length(uint length, const uchar *pos) { switch (length) { case 1: @@ -1284,7 +1293,7 @@ ulong _mi_calc_blob_length(uint length, const byte *pos) } -void _my_store_blob_length(byte *pos,uint pack_length,uint length) +void _my_store_blob_length(uchar *pos,uint pack_length,uint length) { switch (pack_length) { case 1: @@ -1337,11 +1346,11 @@ void _my_store_blob_length(byte *pos,uint pack_length,uint length) -1 Error */ -int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf) +int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *buf) { int block_of_record; uint b_type,left_length; - byte *to; + uchar *to; MI_BLOCK_INFO block_info; File file; DBUG_ENTER("mi_read_dynamic_record"); @@ -1397,7 +1406,7 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf) prefetch_len= block_info.data_len; if (prefetch_len) { - memcpy((byte*) to, block_info.header + offset, prefetch_len); + memcpy((uchar*) to, block_info.header + offset, prefetch_len); block_info.data_len-= prefetch_len; left_length-= prefetch_len; to+= prefetch_len; @@ -1415,7 +1424,7 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf) there is no equivalent without seeking. We are at the right position already. :( */ - if (info->s->file_read(info, (byte*) to, block_info.data_len, + if (info->s->file_read(info, (uchar*) to, block_info.data_len, filepos, MYF(MY_NABP))) goto panic; left_length-=block_info.data_len; @@ -1442,9 +1451,9 @@ err: /* compare unique constraint between stored rows */ int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def, - const byte *record, my_off_t pos) + const uchar *record, my_off_t pos) { - byte *rec_buff,*old_record; + uchar *rec_buff,*old_record; int error; DBUG_ENTER("_mi_cmp_dynamic_unique"); @@ -1470,11 +1479,11 @@ int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def, /* Compare of record one disk with packed record in memory */ -int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record) +int _mi_cmp_dynamic_record(register MI_INFO *info, register const uchar *record) { uint flag,reclength,b_type; my_off_t filepos; - byte *buffer; + uchar *buffer; MI_BLOCK_INFO block_info; DBUG_ENTER("_mi_cmp_dynamic_record"); @@ -1496,7 +1505,7 @@ int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record) { /* If check isn't disabled */ if (info->s->base.blobs) { - if (!(buffer=(byte*) my_alloca(info->s->base.pack_reclength+ + if (!(buffer=(uchar*) my_alloca(info->s->base.pack_reclength+ _my_calc_total_blob_length(info,record)))) DBUG_RETURN(-1); } @@ -1544,18 +1553,18 @@ int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record) my_errno=0; err: if (buffer != info->rec_buff) - my_afree((gptr) buffer); + my_afree((uchar*) buffer); DBUG_RETURN(my_errno); } /* Compare file to buffert */ -static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos, +static int _mi_cmp_buffer(File file, const uchar *buff, my_off_t filepos, uint length) { uint next_length; - char temp_buff[IO_SIZE*2]; + uchar temp_buff[IO_SIZE*2]; DBUG_ENTER("_mi_cmp_buffer"); next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1)); @@ -1563,7 +1572,7 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos, while (length > IO_SIZE*2) { if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) || - memcmp((byte*) buff,temp_buff,next_length)) + memcmp(buff, temp_buff, next_length)) goto err; filepos+=next_length; buff+=next_length; @@ -1572,7 +1581,7 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos, } if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP))) goto err; - DBUG_RETURN(memcmp((byte*) buff,temp_buff,length)); + DBUG_RETURN(memcmp(buff,temp_buff,length)); err: DBUG_RETURN(1); } @@ -1612,13 +1621,13 @@ err: != 0 Error */ -int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf, +int _mi_read_rnd_dynamic_record(MI_INFO *info, uchar *buf, register my_off_t filepos, my_bool skip_deleted_blocks) { int block_of_record, info_read, save_errno; uint left_len,b_type; - byte *to; + uchar *to; MI_BLOCK_INFO block_info; MYISAM_SHARE *share=info->s; DBUG_ENTER("_mi_read_rnd_dynamic_record"); @@ -1664,7 +1673,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf, } if (info->opt_flag & READ_CACHE_USED) { - if (_mi_read_cache(&info->rec_cache,(byte*) block_info.header,filepos, + if (_mi_read_cache(&info->rec_cache,(uchar*) block_info.header,filepos, sizeof(block_info.header), (!block_of_record && skip_deleted_blocks ? READING_NEXT : 0) | READING_HEADER)) @@ -1727,7 +1736,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf, tmp_length= block_info.data_len; if (tmp_length) { - memcpy((byte*) to, block_info.header+offset,tmp_length); + memcpy((uchar*) to, block_info.header+offset,tmp_length); block_info.data_len-=tmp_length; left_len-=tmp_length; to+=tmp_length; @@ -1739,7 +1748,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf, { if (info->opt_flag & READ_CACHE_USED) { - if (_mi_read_cache(&info->rec_cache,(byte*) to,filepos, + if (_mi_read_cache(&info->rec_cache,(uchar*) to,filepos, block_info.data_len, (!block_of_record && skip_deleted_blocks) ? READING_NEXT : 0)) @@ -1753,7 +1762,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf, flush_io_cache(&info->rec_cache)) goto err; /* VOID(my_seek(info->dfile,filepos,MY_SEEK_SET,MYF(0))); */ - if (my_read(info->dfile,(byte*) to,block_info.data_len,MYF(MY_NABP))) + if (my_read(info->dfile,(uchar*) to,block_info.data_len,MYF(MY_NABP))) { if (my_errno == -1) my_errno= HA_ERR_WRONG_IN_RECORD; /* Unexpected end of file */ @@ -1806,11 +1815,11 @@ uint _mi_get_block_info(MI_BLOCK_INFO *info, File file, my_off_t filepos) my_pread() may leave the file pointer untouched. */ VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0))); - if (my_read(file,(char*) header,sizeof(info->header),MYF(0)) != + if (my_read(file, header, sizeof(info->header),MYF(0)) != sizeof(info->header)) goto err; } - DBUG_DUMP("header",(byte*) header,MI_BLOCK_INFO_HEADER_LENGTH); + DBUG_DUMP("header",header,MI_BLOCK_INFO_HEADER_LENGTH); if (info->second_read) { if (info->header[0] <= 6 || info->header[0] == 13) diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index e1288fa6624..1b4c79d13de 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -78,7 +78,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) if (_mi_memmap_file(info)) { /* We don't nead MADV_SEQUENTIAL if small file */ - madvise(share->file_map,share->state.state.data_file_length, + madvise((char*) share->file_map, share->state.state.data_file_length, share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ? MADV_RANDOM : MADV_SEQUENTIAL); pthread_mutex_unlock(&share->intern_lock); @@ -158,7 +158,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) - madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM); + madvise((char*) share->file_map, share->state.state.data_file_length, + MADV_RANDOM); #endif break; case HA_EXTRA_FLUSH_CACHE: @@ -180,8 +181,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) case HA_EXTRA_KEYREAD: /* Read only keys to record */ case HA_EXTRA_REMEMBER_POS: info->opt_flag |= REMEMBER_OLD_POS; - bmove((byte*) info->lastkey+share->base.max_key_length*2, - (byte*) info->lastkey,info->lastkey_length); + bmove((uchar*) info->lastkey+share->base.max_key_length*2, + (uchar*) info->lastkey,info->lastkey_length); info->save_update= info->update; info->save_lastinx= info->lastinx; info->save_lastpos= info->lastpos; @@ -197,8 +198,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) case HA_EXTRA_RESTORE_POS: if (info->opt_flag & REMEMBER_OLD_POS) { - bmove((byte*) info->lastkey, - (byte*) info->lastkey+share->base.max_key_length*2, + bmove((uchar*) info->lastkey, + (uchar*) info->lastkey+share->base.max_key_length*2, info->save_lastkey_length); info->update= info->save_update | HA_STATE_WRITTEN; info->lastinx= info->save_lastinx; @@ -258,7 +259,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) case HA_EXTRA_PREPARE_FOR_DELETE: pthread_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ -#ifdef __WIN__ +#ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND /* Close the isam and data files as Win32 can't drop an open table */ pthread_mutex_lock(&share->intern_lock); if (flush_key_blocks(share->key_cache, share->kfile, @@ -349,6 +350,13 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) case HA_EXTRA_MMAP: #ifdef HAVE_MMAP pthread_mutex_lock(&share->intern_lock); + /* + Memory map the data file if it is not already mapped. It is safe + to memory map a file while other threads are using file I/O on it. + Assigning a new address to a function pointer is an atomic + operation. intern_lock prevents that two or more mappings are done + at the same time. + */ if (!share->file_map) { if (mi_dynmap_file(info, share->state.state.data_file_length)) @@ -378,7 +386,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) { char tmp[1]; tmp[0]=function; - myisam_log_command(MI_LOG_EXTRA,info,(byte*) tmp,1,error); + myisam_log_command(MI_LOG_EXTRA,info,(uchar*) tmp,1,error); } DBUG_RETURN(error); } /* mi_extra */ @@ -428,7 +436,8 @@ int mi_reset(MI_INFO *info) mi_alloc_rec_buff(info, -1, &info->rec_buff); #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) if (info->opt_flag & MEMMAP_USED) - madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM); + madvise((char*) share->file_map, share->state.state.data_file_length, + MADV_RANDOM); #endif info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); info->quick_mode=0; diff --git a/storage/myisam/mi_info.c b/storage/myisam/mi_info.c index a2abab696d1..91e7ca659e4 100644 --- a/storage/myisam/mi_info.c +++ b/storage/myisam/mi_info.c @@ -57,9 +57,9 @@ int mi_status(MI_INFO *info, register MI_ISAMINFO *x, uint flag) x->keys = share->state.header.keys; x->check_time = share->state.check_time; - x->mean_reclength = info->state->records ? - (ulong) ((info->state->data_file_length-info->state->empty)/ - info->state->records) : (ulong) share->min_pack_length; + x->mean_reclength= x->records ? + (ulong) ((x->data_file_length - x->delete_length) / x->records) : + (ulong) share->min_pack_length; } if (flag & HA_STATUS_ERRKEY) { diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c index b203286d544..3f445ebf44d 100644 --- a/storage/myisam/mi_key.c +++ b/storage/myisam/mi_key.c @@ -31,7 +31,7 @@ set_if_smaller(char_length,length); \ } while(0) -static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record); +static int _mi_put_key_in_record(MI_INFO *info,uint keynr,uchar *record); /* Make a intern key from a record @@ -49,9 +49,9 @@ static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record); */ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, - const byte *record, my_off_t filepos) + const uchar *record, my_off_t filepos) { - byte *pos; + uchar *pos; uchar *start; reg1 HA_KEYSEG *keyseg; my_bool is_ft= info->s->keyinfo[keynr].flag & HA_FULLTEXT; @@ -90,7 +90,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, char_length= ((!is_ft && cs && cs->mbmaxlen > 1) ? length/cs->mbmaxlen : length); - pos= (byte*) record+keyseg->start; + pos= (uchar*) record+keyseg->start; if (type == HA_KEYTYPE_BIT) { if (keyseg->bit_length) @@ -100,7 +100,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, *key++= bits; length--; } - memcpy((byte*) key, pos, length); + memcpy((uchar*) key, pos, length); key+= length; continue; } @@ -108,18 +108,18 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, { if (type != HA_KEYTYPE_NUM) { - length= cs->cset->lengthsp(cs, pos, length); + length= cs->cset->lengthsp(cs, (char*) pos, length); } else { - byte *end= pos + length; + uchar *end= pos + length; while (pos < end && pos[0] == ' ') pos++; length=(uint) (end-pos); } FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key,(byte*) pos,(size_t) char_length); + memcpy((uchar*) key,(uchar*) pos,(size_t) char_length); key+=char_length; continue; } @@ -132,18 +132,18 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key,(byte*) pos,(size_t) char_length); + memcpy((uchar*) key,(uchar*) pos,(size_t) char_length); key+= char_length; continue; } else if (keyseg->flag & HA_BLOB_PART) { uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos); - memcpy_fixed((byte*) &pos,pos+keyseg->bit_start,sizeof(char*)); + memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*)); set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key,(byte*) pos,(size_t) char_length); + memcpy((uchar*) key,(uchar*) pos,(size_t) char_length); key+= char_length; continue; } @@ -182,14 +182,14 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, continue; } FIX_LENGTH(cs, pos, length, char_length); - memcpy((byte*) key, pos, char_length); + memcpy((uchar*) key, pos, char_length); if (length > char_length) cs->cset->fill(cs, (char*) key+char_length, length-char_length, ' '); key+= length; } _mi_dpointer(info,key,filepos); DBUG_PRINT("exit",("keynr: %d",keynr)); - DBUG_DUMP("key",(byte*) start,(uint) (key-start)+keyseg->length); + DBUG_DUMP("key",(uchar*) start,(uint) (key-start)+keyseg->length); DBUG_EXECUTE("key", _mi_print_key(DBUG_FILE,info->s->keyinfo[keynr].seg,start, (uint) (key-start));); @@ -206,7 +206,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, uint keynr key number key Store packed key here old Not packed key - k_length Length of 'old' to use + keypart_map bitmap of used keyparts last_used_keyseg out parameter. May be NULL RETURN @@ -216,34 +216,36 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, */ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old, - uint k_length, HA_KEYSEG **last_used_keyseg) + key_part_map keypart_map, HA_KEYSEG **last_used_keyseg) { uchar *start_key=key; HA_KEYSEG *keyseg; my_bool is_ft= info->s->keyinfo[keynr].flag & HA_FULLTEXT; DBUG_ENTER("_mi_pack_key"); - for (keyseg=info->s->keyinfo[keynr].seg ; - keyseg->type && (int) k_length > 0; - old+=keyseg->length, keyseg++) + /* "one part" rtree key is 2*SPDIMS part key in MyISAM */ + if (info->s->keyinfo[keynr].key_alg == HA_KEY_ALG_RTREE) + keypart_map= (((key_part_map)1) << (2*SPDIMS)) - 1; + + /* only key prefixes are supported */ + DBUG_ASSERT(((keypart_map+1) & keypart_map) == 0); + + for (keyseg= info->s->keyinfo[keynr].seg ; keyseg->type && keypart_map; + old+= keyseg->length, keyseg++) { - enum ha_base_keytype type=(enum ha_base_keytype) keyseg->type; - uint length=min((uint) keyseg->length,(uint) k_length); + enum ha_base_keytype type= (enum ha_base_keytype) keyseg->type; + uint length= keyseg->length; uint char_length; uchar *pos; CHARSET_INFO *cs=keyseg->charset; + keypart_map>>= 1; if (keyseg->null_bit) { - k_length--; if (!(*key++= (char) 1-*old++)) /* Copy null marker */ { - k_length-=length; if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART)) - { - k_length-=2; /* Skip length */ old+= 2; - } continue; /* Found NULL */ } } @@ -252,21 +254,20 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old, if (keyseg->flag & HA_SPACE_PACK) { uchar *end=pos+length; - if (type != HA_KEYTYPE_NUM) - { - while (end > pos && end[-1] == ' ') - end--; - } - else + if (type == HA_KEYTYPE_NUM) { while (pos < end && pos[0] == ' ') pos++; } - k_length-=length; + else if (type != HA_KEYTYPE_BINARY) + { + while (end > pos && end[-1] == ' ') + end--; + } length=(uint) (end-pos); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); - memcpy((byte*) key,pos,(size_t) char_length); + memcpy((uchar*) key,pos,(size_t) char_length); key+= char_length; continue; } @@ -274,55 +275,31 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old, { /* Length of key-part used with mi_rkey() always 2 */ uint tmp_length=uint2korr(pos); - k_length-= 2+length; pos+=2; set_if_smaller(length,tmp_length); /* Safety */ FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); old+=2; /* Skip length */ - memcpy((byte*) key, pos,(size_t) char_length); + memcpy((uchar*) key, pos,(size_t) char_length); key+= char_length; continue; } else if (keyseg->flag & HA_SWAP_KEY) { /* Numerical column */ pos+=length; - k_length-=length; while (length--) - { *key++ = *--pos; - } continue; } FIX_LENGTH(cs, pos, length, char_length); - memcpy((byte*) key, pos, char_length); + memcpy((uchar*) key, pos, char_length); if (length > char_length) cs->cset->fill(cs, (char*) key+char_length, length-char_length, ' '); key+= length; - k_length-=length; } if (last_used_keyseg) *last_used_keyseg= keyseg; -#ifdef NOT_USED - if (keyseg->type) - { - /* Part-key ; fill with ASCII 0 for easier searching */ - length= (uint) -k_length; /* unused part of last key */ - do - { - if (keyseg->flag & HA_NULL_PART) - length++; - if (keyseg->flag & HA_SPACE_PACK) - length+=2; - else - length+= keyseg->length; - keyseg++; - } while (keyseg->type); - bzero((byte*) key,length); - key+=length; - } -#endif DBUG_RETURN((uint) (key-start_key)); } /* _mi_pack_key */ @@ -348,16 +325,16 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old, */ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, - byte *record) + uchar *record) { - reg2 byte *key; - byte *pos,*key_end; + reg2 uchar *key; + uchar *pos,*key_end; reg1 HA_KEYSEG *keyseg; - byte *blob_ptr; + uchar *blob_ptr; DBUG_ENTER("_mi_put_key_in_record"); - blob_ptr= (byte*) info->lastkey2; /* Place to put blob parts */ - key=(byte*) info->lastkey; /* KEy that was read */ + blob_ptr= (uchar*) info->lastkey2; /* Place to put blob parts */ + key=(uchar*) info->lastkey; /* KEy that was read */ key_end=key+info->lastkey_length; for (keyseg=info->s->keyinfo[keynr].seg ; keyseg->type ;keyseg++) { @@ -386,7 +363,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, clr_rec_bits(record + keyseg->bit_pos, keyseg->bit_start, keyseg->bit_length); } - memcpy(record + keyseg->start, (byte*) key, length); + memcpy(record + keyseg->start, (uchar*) key, length); key+= length; continue; } @@ -403,7 +380,8 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, { memcpy(pos,key,(size_t) length); keyseg->charset->cset->fill(keyseg->charset, - pos + length, keyseg->length - length, + (char*) pos + length, + keyseg->length - length, ' '); } else @@ -429,7 +407,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, else int2store(record+keyseg->start, length); /* And key data */ - memcpy(record+keyseg->start + keyseg->bit_start, (byte*) key, length); + memcpy(record+keyseg->start + keyseg->bit_start, (uchar*) key, length); key+= length; } else if (keyseg->flag & HA_BLOB_PART) @@ -454,8 +432,8 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, } else if (keyseg->flag & HA_SWAP_KEY) { - byte *to= record+keyseg->start+keyseg->length; - byte *end= key+keyseg->length; + uchar *to= record+keyseg->start+keyseg->length; + uchar *end= key+keyseg->length; #ifdef CHECK_KEYS if (end > key_end) goto err; @@ -472,7 +450,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, if (key+keyseg->length > key_end) goto err; #endif - memcpy(record+keyseg->start,(byte*) key, + memcpy(record+keyseg->start,(uchar*) key, (size_t) keyseg->length); key+= keyseg->length; } @@ -486,7 +464,7 @@ err: /* Here when key reads are used */ -int _mi_read_key_record(MI_INFO *info, my_off_t filepos, byte *buf) +int _mi_read_key_record(MI_INFO *info, my_off_t filepos, uchar *buf) { fast_mi_writeinfo(info); if (filepos != HA_OFFSET_ERROR) @@ -521,7 +499,7 @@ int _mi_read_key_record(MI_INFO *info, my_off_t filepos, byte *buf) less than zero. */ -ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record) +ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record) { ulonglong value= 0; /* Store unsigned values here */ longlong s_value= 0; /* Store signed values here */ diff --git a/storage/myisam/mi_keycache.c b/storage/myisam/mi_keycache.c index 6694893e9c3..5cf3fede1ae 100644 --- a/storage/myisam/mi_keycache.c +++ b/storage/myisam/mi_keycache.c @@ -104,7 +104,8 @@ int mi_assign_to_key_cache(MI_INFO *info, share->key_cache= key_cache; /* store the key cache in the global hash structure for future opens */ - if (multi_key_cache_set(share->unique_file_name, share->unique_name_length, + if (multi_key_cache_set((uchar*) share->unique_file_name, + share->unique_name_length, share->key_cache)) error= my_errno; pthread_mutex_unlock(&share->intern_lock); diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index e822ea9e6da..ec359d13a14 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -254,7 +254,7 @@ int mi_lock_database(MI_INFO *info, int lock_type) pthread_mutex_unlock(&share->intern_lock); #if defined(FULL_LOG) || defined(_lint) lock_type|=(int) (flag << 8); /* Set bit to set if real lock */ - myisam_log_command(MI_LOG_LOCK,info,(byte*) &lock_type,sizeof(lock_type), + myisam_log_command(MI_LOG_LOCK,info,(uchar*) &lock_type,sizeof(lock_type), error); #endif DBUG_RETURN(error); @@ -520,7 +520,7 @@ int _mi_test_if_changed(register MI_INFO *info) int _mi_mark_file_changed(MI_INFO *info) { - char buff[3]; + uchar buff[3]; register MYISAM_SHARE *share=info->s; DBUG_ENTER("_mi_mark_file_changed"); @@ -553,7 +553,7 @@ int _mi_mark_file_changed(MI_INFO *info) int _mi_decrement_open_count(MI_INFO *info) { - char buff[2]; + uchar buff[2]; register MYISAM_SHARE *share=info->s; int lock_error=0,write_error=0; if (share->global_changed) diff --git a/storage/myisam/mi_log.c b/storage/myisam/mi_log.c index 2672a9dacd6..8b9ca038fec 100644 --- a/storage/myisam/mi_log.c +++ b/storage/myisam/mi_log.c @@ -31,7 +31,7 @@ #undef GETPID /* For HPUX */ #ifdef THREAD -#define GETPID() (log_type == 1 ? (long) myisam_pid : (long) my_thread_id()); +#define GETPID() (log_type == 1 ? (long) myisam_pid : (long) my_thread_dbug_id()) #else #define GETPID() myisam_pid #endif @@ -74,9 +74,9 @@ int mi_log(int activate_log) /* All logs starts with command(1) dfile(2) process(4) result(2) */ void _myisam_log(enum myisam_log_commands command, MI_INFO *info, - const byte *buffert, uint length) + const uchar *buffert, uint length) { - char buff[11]; + uchar buff[11]; int error,old_errno; ulong pid=(ulong) GETPID(); old_errno=my_errno; @@ -98,9 +98,9 @@ void _myisam_log(enum myisam_log_commands command, MI_INFO *info, void _myisam_log_command(enum myisam_log_commands command, MI_INFO *info, - const byte *buffert, uint length, int result) + const uchar *buffert, uint length, int result) { - char buff[9]; + uchar buff[9]; int error,old_errno; ulong pid=(ulong) GETPID(); @@ -122,9 +122,9 @@ void _myisam_log_command(enum myisam_log_commands command, MI_INFO *info, void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info, - const byte *record, my_off_t filepos, int result) + const uchar *record, my_off_t filepos, int result) { - char buff[21],*pos; + uchar buff[21],*pos; int error,old_errno; uint length; ulong pid=(ulong) GETPID(); @@ -134,7 +134,7 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info, length=info->s->base.reclength; else length=info->s->base.reclength+ _my_calc_total_blob_length(info,record); - buff[0]=(char) command; + buff[0]=(uchar) command; mi_int2store(buff+1,info->dfile); mi_int4store(buff+3,pid); mi_int2store(buff+7,result); @@ -142,8 +142,8 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info, mi_int4store(buff+17,length); pthread_mutex_lock(&THR_LOCK_myisam); error=my_lock(myisam_log_file,F_WRLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE)); - VOID(my_write(myisam_log_file,buff,sizeof(buff),MYF(0))); - VOID(my_write(myisam_log_file,(byte*) record,info->s->base.reclength,MYF(0))); + VOID(my_write(myisam_log_file, buff,sizeof(buff),MYF(0))); + VOID(my_write(myisam_log_file, record,info->s->base.reclength,MYF(0))); if (info->s->base.blobs) { MI_BLOB *blob,*end; @@ -152,7 +152,8 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info, blob != end ; blob++) { - memcpy_fixed(&pos,record+blob->offset+blob->pack_length,sizeof(char*)); + memcpy_fixed((uchar*) &pos, record+blob->offset+blob->pack_length, + sizeof(char*)); VOID(my_write(myisam_log_file,pos,blob->length,MYF(0))); } } diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index afed5d05963..b848c822f75 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -79,7 +79,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) key_parts,unique_key_parts,fulltext_keys,uniques; char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], data_name[FN_REFLEN]; - char *disk_cache, *disk_pos, *end_pos; + uchar *disk_cache, *disk_pos, *end_pos; MI_INFO info,*m_info,*old_info; MYISAM_SHARE share_buff,*share; ulong rec_per_key_part[MI_MAX_POSSIBLE_KEY*MI_MAX_KEY_SEG]; @@ -92,7 +92,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) lock_error=1; errpos=0; head_length=sizeof(share_buff.state.header); - bzero((byte*) &info,sizeof(info)); + bzero((uchar*) &info,sizeof(info)); my_realpath(name_buff, fn_format(org_name,name,"",MI_NAME_IEXT, MY_UNPACK_FILENAME),MYF(0)); @@ -100,11 +100,12 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) if (!(old_info=test_if_reopen(name_buff))) { share= &share_buff; - bzero((gptr) &share_buff,sizeof(share_buff)); + bzero((uchar*) &share_buff,sizeof(share_buff)); share_buff.state.rec_per_key_part=rec_per_key_part; share_buff.state.key_root=key_root; share_buff.state.key_del=key_del; - share_buff.key_cache= multi_key_cache_search(name_buff, strlen(name_buff)); + share_buff.key_cache= multi_key_cache_search((uchar*) name_buff, + strlen(name_buff)); DBUG_EXECUTE_IF("myisam_pretend_crashed_table_on_open", if (strstr(name, "/t1")) @@ -121,14 +122,14 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } share->mode=open_mode; errpos=1; - if (my_read(kfile,(char*) share->state.header.file_version,head_length, + if (my_read(kfile, share->state.header.file_version, head_length, MYF(MY_NABP))) { my_errno= HA_ERR_NOT_A_TABLE; goto err; } - if (memcmp((byte*) share->state.header.file_version, - (byte*) myisam_file_magic, 4)) + if (memcmp((uchar*) share->state.header.file_version, + (uchar*) myisam_file_magic, 4)) { DBUG_PRINT("error",("Wrong header in %s",name_buff)); DBUG_DUMP("error_dump",(char*) share->state.header.file_version, @@ -165,7 +166,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) info_length=mi_uint2korr(share->state.header.header_length); base_pos=mi_uint2korr(share->state.header.base_pos); - if (!(disk_cache=(char*) my_alloca(info_length+128))) + if (!(disk_cache= (uchar*) my_alloca(info_length+128))) { my_errno=ENOMEM; goto err; @@ -202,15 +203,14 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } share->state_diff_length=len-MI_STATE_INFO_SIZE; - mi_state_info_read((uchar*) disk_cache, &share->state); + mi_state_info_read(disk_cache, &share->state); len= mi_uint2korr(share->state.header.base_info_length); if (len != MI_BASE_INFO_SIZE) { DBUG_PRINT("warning",("saved_base_info_length: %d base_info_length: %d", len,MI_BASE_INFO_SIZE)); } - disk_pos= (char*) - my_n_base_info_read((uchar*) disk_cache + base_pos, &share->base); + disk_pos= my_n_base_info_read(disk_cache + base_pos, &share->base); share->state.state_length=base_pos; if (!(open_flags & HA_OPEN_FOR_REPAIR) && @@ -236,7 +236,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) key_parts+=fulltext_keys*FT_SEGS; if (share->base.max_key_length > MI_MAX_KEY_BUFF || keys > MI_MAX_KEY || - key_parts >= MI_MAX_KEY * MI_MAX_KEY_SEG) + key_parts > MI_MAX_KEY * MI_MAX_KEY_SEG) { DBUG_PRINT("error",("Wrong key info: Max_key_length: %d keys: %d key_parts: %d", share->base.max_key_length, keys, key_parts)); my_errno=HA_ERR_UNSUPPORTED; @@ -376,11 +376,11 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } else { - uint j; + uint k; share->keyinfo[i].seg=pos; - for (j=0; j < FT_SEGS; j++) + for (k=0; k < FT_SEGS; k++) { - *pos=ft_keysegs[j]; + *pos= ft_keysegs[k]; pos[0].language= pos[-1].language; if (!(pos[0].charset= pos[-1].charset)) { @@ -504,7 +504,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } else if (share->options & HA_OPTION_PACK_RECORD) share->data_file_type = DYNAMIC_RECORD; - my_afree((gptr) disk_cache); + my_afree(disk_cache); mi_setup_functions(share); share->is_log_table= FALSE; #ifdef THREAD @@ -536,6 +536,14 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } } #endif + /* + Memory mapping can only be requested after initializing intern_lock. + */ + if (open_flags & HA_OPEN_MMAP) + { + info.s= share; + mi_extra(&info, HA_EXTRA_MMAP, 0); + } } else { @@ -634,7 +642,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) if (myisam_log_file >= 0) { intern_filename(name_buff,share->index_file_name); - _myisam_log(MI_LOG_OPEN,m_info,name_buff,(uint) strlen(name_buff)); + _myisam_log(MI_LOG_OPEN, m_info, (uchar*) name_buff, strlen(name_buff)); } DBUG_RETURN(m_info); @@ -646,7 +654,7 @@ err: mi_report_error(save_errno, name); switch (errpos) { case 6: - my_free((gptr) m_info,MYF(0)); + my_free((uchar*) m_info,MYF(0)); /* fall through */ case 5: VOID(my_close(info.dfile,MYF(0))); @@ -654,14 +662,14 @@ err: break; /* Don't remove open table */ /* fall through */ case 4: - my_free((gptr) share,MYF(0)); + my_free((uchar*) share,MYF(0)); /* fall through */ case 3: if (! lock_error) VOID(my_lock(kfile, F_UNLCK, 0L, F_TO_EOF, MYF(MY_SEEK_NOT_DONE))); /* fall through */ case 2: - my_afree((gptr) disk_cache); + my_afree(disk_cache); /* fall through */ case 1: VOID(my_close(kfile,MYF(0))); @@ -676,7 +684,7 @@ err: } /* mi_open */ -byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf) +uchar *mi_alloc_rec_buff(MI_INFO *info, ulong length, uchar **buf) { uint extra; uint32 old_length; @@ -684,7 +692,7 @@ byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf) if (! *buf || length > (old_length=mi_get_rec_buff_len(info, *buf))) { - byte *newptr = *buf; + uchar *newptr = *buf; /* to simplify initial init of info->rec_buf in mi_open and mi_extra */ if (length == (ulong) -1) @@ -701,7 +709,7 @@ byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf) MI_REC_BUFF_OFFSET : 0); if (extra && newptr) newptr-= MI_REC_BUFF_OFFSET; - if (!(newptr=(byte*) my_realloc((gptr)newptr, length+extra+8, + if (!(newptr=(uchar*) my_realloc((uchar*)newptr, length+extra+8, MYF(MY_ALLOW_ZERO_PTR)))) return newptr; *((uint32 *) newptr)= (uint32) length; @@ -891,10 +899,10 @@ uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite) } if (pWrite & 1) - DBUG_RETURN(my_pwrite(file,(char*) buff, (uint) (ptr-buff), 0L, - MYF(MY_NABP | MY_THREADSAFE))); - DBUG_RETURN(my_write(file, (char*) buff, (uint) (ptr-buff), - MYF(MY_NABP))); + DBUG_RETURN(my_pwrite(file, buff, (size_t) (ptr-buff), 0L, + MYF(MY_NABP | MY_THREADSAFE)) != 0); + DBUG_RETURN(my_write(file, buff, (size_t) (ptr-buff), + MYF(MY_NABP)) != 0); } @@ -953,18 +961,18 @@ uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state) uint mi_state_info_read_dsk(File file, MI_STATE_INFO *state, my_bool pRead) { - char buff[MI_STATE_INFO_SIZE + MI_STATE_EXTRA_SIZE]; + uchar buff[MI_STATE_INFO_SIZE + MI_STATE_EXTRA_SIZE]; if (!myisam_single_user) { if (pRead) { if (my_pread(file, buff, state->state_length,0L, MYF(MY_NABP))) - return (MY_FILE_ERROR); + return 1; } else if (my_read(file, buff, state->state_length,MYF(MY_NABP))) - return (MY_FILE_ERROR); - mi_state_info_read((uchar*) buff, state); + return 1; + mi_state_info_read(buff, state); } return 0; } @@ -1005,7 +1013,7 @@ uint mi_base_info_write(File file, MI_BASE_INFO *base) mi_int2store(ptr,base->raid_chunks); ptr +=2; mi_int4store(ptr,base->raid_chunksize); ptr +=4; bzero(ptr,6); ptr +=6; /* extra */ - return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); + return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } @@ -1065,10 +1073,10 @@ uint mi_keydef_write(File file, MI_KEYDEF *keydef) mi_int2store(ptr,keydef->keylength); ptr +=2; mi_int2store(ptr,keydef->minlength); ptr +=2; mi_int2store(ptr,keydef->maxlength); ptr +=2; - return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); + return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } -char *mi_keydef_read(char *ptr, MI_KEYDEF *keydef) +uchar *mi_keydef_read(uchar *ptr, MI_KEYDEF *keydef) { keydef->keysegs = (uint) *ptr++; keydef->key_alg = *ptr++; /* Rtree or Btree */ @@ -1109,11 +1117,11 @@ int mi_keyseg_write(File file, const HA_KEYSEG *keyseg) mi_int4store(ptr, pos); ptr+=4; - return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); + return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } -char *mi_keyseg_read(char *ptr, HA_KEYSEG *keyseg) +uchar *mi_keyseg_read(uchar *ptr, HA_KEYSEG *keyseg) { keyseg->type = *ptr++; keyseg->language = *ptr++; @@ -1149,10 +1157,10 @@ uint mi_uniquedef_write(File file, MI_UNIQUEDEF *def) *ptr++= (uchar) def->key; *ptr++ = (uchar) def->null_are_equal; - return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); + return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } -char *mi_uniquedef_read(char *ptr, MI_UNIQUEDEF *def) +uchar *mi_uniquedef_read(uchar *ptr, MI_UNIQUEDEF *def) { def->keysegs = mi_uint2korr(ptr); def->key = ptr[2]; @@ -1173,10 +1181,10 @@ uint mi_recinfo_write(File file, MI_COLUMNDEF *recinfo) mi_int2store(ptr,recinfo->length); ptr +=2; *ptr++ = recinfo->null_bit; mi_int2store(ptr,recinfo->null_pos); ptr+= 2; - return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP)); + return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } -char *mi_recinfo_read(char *ptr, MI_COLUMNDEF *recinfo) +uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo) { recinfo->type= mi_sint2korr(ptr); ptr +=2; recinfo->length=mi_uint2korr(ptr); ptr +=2; @@ -1194,7 +1202,8 @@ The argument file_to_dup is here for the future if there would on some OS exist a dup()-like call that would give us two different file descriptors. *************************************************************************/ -int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, File file_to_dup __attribute__((unused))) +int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, + File file_to_dup __attribute__((unused))) { #ifdef USE_RAID if (share->base.raid_type) @@ -1217,7 +1226,7 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, File file_to_dup __attr int mi_open_keyfile(MYISAM_SHARE *share) { if ((share->kfile=my_open(share->unique_file_name, share->mode | O_SHARE, - MYF(MY_WME))) < 0) + MYF(MY_WME))) < 0) return 1; return 0; } diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c index 30c95dcb0bd..305b7e5532c 100644 --- a/storage/myisam/mi_packrec.c +++ b/storage/myisam/mi_packrec.c @@ -48,7 +48,7 @@ #define OFFSET_TABLE_SIZE 512 static uint read_huff_table(MI_BIT_BUFF *bit_buff,MI_DECODE_TREE *decode_tree, - uint16 **decode_table,byte **intervall_buff, + uint16 **decode_table,uchar **intervall_buff, uint16 *tmp_buff); static void make_quick_table(uint16 *to_table,uint16 *decode_table, uint *next_free,uint value,uint bits, @@ -107,7 +107,7 @@ static void fill_buffer(MI_BIT_BUFF *bit_buff); static uint max_bit(uint value); #ifdef HAVE_MMAP static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, - MI_BLOCK_INFO *info, byte **rec_buff_p, + MI_BLOCK_INFO *info, uchar **rec_buff_p, uchar *header); #endif @@ -136,7 +136,8 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) uint i,trees,huff_tree_bits,rec_reflength,length; uint16 *decode_table,*tmp_buff; ulong elements,intervall_length; - char *disk_cache,*intervall_buff; + uchar *disk_cache; + uchar *intervall_buff; uchar header[HEAD_LENGTH]; MYISAM_SHARE *share=info->s; MI_BIT_BUFF bit_buff; @@ -149,14 +150,14 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) file=info->dfile; my_errno=0; - if (my_read(file,(byte*) header,sizeof(header),MYF(MY_NABP))) + if (my_read(file,(uchar*) header,sizeof(header),MYF(MY_NABP))) { if (!my_errno) my_errno=HA_ERR_END_OF_FILE; goto err0; } /* Only the first three bytes of magic number are independent of version. */ - if (memcmp((byte*) header, (byte*) myisam_pack_file_magic, 3)) + if (memcmp((uchar*) header, (uchar*) myisam_pack_file_magic, 3)) { my_errno=HA_ERR_WRONG_IN_RECORD; goto err0; @@ -195,10 +196,10 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) */ if (!(share->decode_trees=(MI_DECODE_TREE*) my_malloc((uint) (trees*sizeof(MI_DECODE_TREE)+ - intervall_length*sizeof(byte)), + intervall_length*sizeof(uchar)), MYF(MY_WME)))) goto err0; - intervall_buff=(byte*) (share->decode_trees+trees); + intervall_buff=(uchar*) (share->decode_trees+trees); /* Memory segment #2: @@ -215,7 +216,7 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) MYF(MY_WME | MY_ZEROFILL)))) goto err1; tmp_buff=share->decode_tables+length; - disk_cache=(byte*) (tmp_buff+OFFSET_TABLE_SIZE); + disk_cache= (uchar*) (tmp_buff+OFFSET_TABLE_SIZE); if (my_read(file,disk_cache, (uint) (share->pack.header_length-sizeof(header)), @@ -223,7 +224,7 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) goto err2; huff_tree_bits=max_bit(trees ? trees-1 : 0); - init_bit_buffer(&bit_buff, (uchar*) disk_cache, + init_bit_buffer(&bit_buff, disk_cache, (uint) (share->pack.header_length-sizeof(header))); /* Read new info for each field */ for (i=0 ; i < share->base.fields ; i++) @@ -250,8 +251,8 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) goto err3; /* Reallocate the decoding tables to the used size. */ decode_table=(uint16*) - my_realloc((gptr) share->decode_tables, - (uint) ((byte*) decode_table - (byte*) share->decode_tables), + my_realloc((uchar*) share->decode_tables, + (uint) ((uchar*) decode_table - (uchar*) share->decode_tables), MYF(MY_HOLD_ON_ERROR)); /* Fix the table addresses in the tree heads. */ { @@ -291,9 +292,9 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys) err3: my_errno=HA_ERR_WRONG_IN_RECORD; err2: - my_free((gptr) share->decode_tables,MYF(0)); + my_free((uchar*) share->decode_tables,MYF(0)); err1: - my_free((gptr) share->decode_trees,MYF(0)); + my_free((uchar*) share->decode_trees,MYF(0)); err0: DBUG_RETURN(1); } @@ -318,7 +319,7 @@ err0: */ static uint read_huff_table(MI_BIT_BUFF *bit_buff, MI_DECODE_TREE *decode_tree, - uint16 **decode_table, byte **intervall_buff, + uint16 **decode_table, uchar **intervall_buff, uint16 *tmp_buff) { uint min_chr,elements,char_bits,offset_bits,size,intervall_length,table_bits, @@ -564,7 +565,7 @@ static void fill_quick_table(uint16 *table, uint bits, uint max_bits, */ value|= (max_bits - bits) << 8 | IS_CHAR; - for (end= table + (1 << bits); table < end; table++) + for (end= table + (uint) (((uint) 1 << bits)); table < end; table++) { *table= (uint16) value; } @@ -697,7 +698,7 @@ static uint find_longest_bitstream(uint16 *table, uint16 *end) HA_ERR_WRONG_IN_RECORD or -1 on error */ -int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, byte *buf) +int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, uchar *buf) { MI_BLOCK_INFO block_info; File file; @@ -710,7 +711,7 @@ int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, byte *buf) if (_mi_pack_get_block_info(info, &info->bit_buff, &block_info, &info->rec_buff, file, filepos)) goto err; - if (my_read(file,(byte*) info->rec_buff + block_info.offset , + if (my_read(file,(uchar*) info->rec_buff + block_info.offset , block_info.rec_len - block_info.offset, MYF(MY_NABP))) goto panic; info->update|= HA_STATE_AKTIV; @@ -725,9 +726,9 @@ err: int _mi_pack_rec_unpack(register MI_INFO *info, MI_BIT_BUFF *bit_buff, - register byte *to, byte *from, ulong reclength) + register uchar *to, uchar *from, ulong reclength) { - byte *end_field; + uchar *end_field; reg3 MI_COLUMNDEF *end; MI_COLUMNDEF *current_field; MYISAM_SHARE *share=info->s; @@ -834,7 +835,7 @@ static void uf_space_normal(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end) { if (get_bit(bit_buff)) - bfill((byte*) to,(end-to),' '); + bfill((uchar*) to,(end-to),' '); else decode_bytes(rec,bit_buff,to,end); } @@ -844,7 +845,7 @@ static void uf_space_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, { uint spaces; if (get_bit(bit_buff)) - bfill((byte*) to,(end-to),' '); + bfill((uchar*) to,(end-to),' '); else { if (get_bit(bit_buff)) @@ -856,7 +857,7 @@ static void uf_space_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, } if (to+spaces != end) decode_bytes(rec,bit_buff,to,end-spaces); - bfill((byte*) end-spaces,spaces,' '); + bfill((uchar*) end-spaces,spaces,' '); } else decode_bytes(rec,bit_buff,to,end); @@ -876,7 +877,7 @@ static void uf_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, } if (to+spaces != end) decode_bytes(rec,bit_buff,to,end-spaces); - bfill((byte*) end-spaces,spaces,' '); + bfill((uchar*) end-spaces,spaces,' '); } else decode_bytes(rec,bit_buff,to,end); @@ -887,7 +888,7 @@ static void uf_space_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t { uint spaces; if (get_bit(bit_buff)) - bfill((byte*) to,(end-to),' '); + bfill((uchar*) to,(end-to),' '); else { if ((spaces=get_bits(bit_buff,rec->space_length_bits))+to > end) @@ -897,7 +898,7 @@ static void uf_space_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t } if (to+spaces != end) decode_bytes(rec,bit_buff,to,end-spaces); - bfill((byte*) end-spaces,spaces,' '); + bfill((uchar*) end-spaces,spaces,' '); } } @@ -912,7 +913,7 @@ static void uf_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, } if (to+spaces != end) decode_bytes(rec,bit_buff,to,end-spaces); - bfill((byte*) end-spaces,spaces,' '); + bfill((uchar*) end-spaces,spaces,' '); } static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, @@ -920,7 +921,7 @@ static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, { uint spaces; if (get_bit(bit_buff)) - bfill((byte*) to,(end-to),' '); + bfill((uchar*) to,(end-to),' '); else { if (get_bit(bit_buff)) @@ -930,7 +931,7 @@ static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, bit_buff->error=1; return; } - bfill((byte*) to,spaces,' '); + bfill((uchar*) to,spaces,' '); if (to+spaces != end) decode_bytes(rec,bit_buff,to+spaces,end); } @@ -951,7 +952,7 @@ static void uf_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, bit_buff->error=1; return; } - bfill((byte*) to,spaces,' '); + bfill((uchar*) to,spaces,' '); if (to+spaces != end) decode_bytes(rec,bit_buff,to+spaces,end); } @@ -965,7 +966,7 @@ static void uf_space_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t { uint spaces; if (get_bit(bit_buff)) - bfill((byte*) to,(end-to),' '); + bfill((uchar*) to,(end-to),' '); else { if ((spaces=get_bits(bit_buff,rec->space_length_bits))+to > end) @@ -973,7 +974,7 @@ static void uf_space_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t bit_buff->error=1; return; } - bfill((byte*) to,spaces,' '); + bfill((uchar*) to,spaces,' '); if (to+spaces != end) decode_bytes(rec,bit_buff,to+spaces,end); } @@ -988,7 +989,7 @@ static void uf_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, bit_buff->error=1; return; } - bfill((byte*) to,spaces,' '); + bfill((uchar*) to,spaces,' '); if (to+spaces != end) decode_bytes(rec,bit_buff,to+spaces,end); } @@ -1031,7 +1032,7 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end) { if (get_bit(bit_buff)) - bzero((byte*) to,(end-to)); + bzero((uchar*) to,(end-to)); else { ulong length=get_bits(bit_buff,rec->space_length_bits); @@ -1039,11 +1040,11 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, if (bit_buff->blob_pos+length > bit_buff->blob_end) { bit_buff->error=1; - bzero((byte*) to,(end-to)); + bzero((uchar*) to,(end-to)); return; } decode_bytes(rec,bit_buff,bit_buff->blob_pos,bit_buff->blob_pos+length); - _my_store_blob_length((byte*) to,pack_length,length); + _my_store_blob_length((uchar*) to,pack_length,length); memcpy_fixed((char*) to+pack_length,(char*) &bit_buff->blob_pos, sizeof(char*)); bit_buff->blob_pos+=length; @@ -1286,7 +1287,7 @@ static uint decode_pos(MI_BIT_BUFF *bit_buff, MI_DECODE_TREE *decode_tree) } -int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf, +int _mi_read_rnd_pack_record(MI_INFO *info, uchar *buf, register my_off_t filepos, my_bool skip_deleted_blocks) { @@ -1303,7 +1304,7 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf, if (info->opt_flag & READ_CACHE_USED) { - if (_mi_read_cache(&info->rec_cache, (byte*) block_info.header, + if (_mi_read_cache(&info->rec_cache, (uchar*) block_info.header, filepos, share->pack.ref_length, skip_deleted_blocks ? READING_NEXT : 0)) goto err; @@ -1325,14 +1326,14 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf, if (info->opt_flag & READ_CACHE_USED) { - if (_mi_read_cache(&info->rec_cache, (byte*) info->rec_buff, + if (_mi_read_cache(&info->rec_cache, (uchar*) info->rec_buff, block_info.filepos, block_info.rec_len, skip_deleted_blocks ? READING_NEXT : 0)) goto err; } else { - if (my_read(info->dfile,(byte*) info->rec_buff + block_info.offset, + if (my_read(info->dfile,(uchar*) info->rec_buff + block_info.offset, block_info.rec_len-block_info.offset, MYF(MY_NABP))) goto err; @@ -1352,7 +1353,7 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf, /* Read and process header from a huff-record-file */ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, - MI_BLOCK_INFO *info, byte **rec_buff_p, + MI_BLOCK_INFO *info, uchar **rec_buff_p, File file, my_off_t filepos) { uchar *header=info->header; @@ -1367,9 +1368,9 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, position is ok */ VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0))); - if (my_read(file,(char*) header,ref_length,MYF(MY_NABP))) + if (my_read(file, header,ref_length,MYF(MY_NABP))) return BLOCK_FATAL_ERROR; - DBUG_DUMP("header",(byte*) header,ref_length); + DBUG_DUMP("header",(uchar*) header,ref_length); } head_length= read_pack_length((uint) myisam->s->pack.version, header, &info->rec_len); @@ -1478,8 +1479,8 @@ static uint max_bit(register uint value) #ifdef HAVE_MMAP -static int _mi_read_mempack_record(MI_INFO *info,my_off_t filepos,byte *buf); -static int _mi_read_rnd_mempack_record(MI_INFO*, byte *,my_off_t, my_bool); +static int _mi_read_mempack_record(MI_INFO *info,my_off_t filepos,uchar *buf); +static int _mi_read_rnd_mempack_record(MI_INFO*, uchar *,my_off_t, my_bool); my_bool _mi_memmap_file(MI_INFO *info) { @@ -1506,13 +1507,13 @@ my_bool _mi_memmap_file(MI_INFO *info) void _mi_unmap_file(MI_INFO *info) { - VOID(my_munmap(info->s->file_map, + VOID(my_munmap((char*) info->s->file_map, (size_t) info->s->mmaped_length + MEMMAP_EXTRA_MARGIN)); } static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, - MI_BLOCK_INFO *info, byte **rec_buff_p, + MI_BLOCK_INFO *info, uchar **rec_buff_p, uchar *header) { header+= read_pack_length((uint) myisam->s->pack.version, header, @@ -1532,17 +1533,17 @@ static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, } -static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, byte *buf) +static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, uchar *buf) { MI_BLOCK_INFO block_info; MYISAM_SHARE *share=info->s; - byte *pos; + uchar *pos; DBUG_ENTER("mi_read_mempack_record"); if (filepos == HA_OFFSET_ERROR) DBUG_RETURN(-1); /* _search() didn't find record */ - if (!(pos= (byte*) _mi_mempack_get_block_info(info, &info->bit_buff, + if (!(pos= (uchar*) _mi_mempack_get_block_info(info, &info->bit_buff, &block_info, &info->rec_buff, (uchar*) share->file_map+ filepos))) @@ -1553,14 +1554,14 @@ static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, byte *buf) /*ARGSUSED*/ -static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf, +static int _mi_read_rnd_mempack_record(MI_INFO *info, uchar *buf, register my_off_t filepos, my_bool skip_deleted_blocks __attribute__((unused))) { MI_BLOCK_INFO block_info; MYISAM_SHARE *share=info->s; - byte *pos,*start; + uchar *pos,*start; DBUG_ENTER("_mi_read_rnd_mempack_record"); if (filepos >= share->state.state.data_file_length) @@ -1568,7 +1569,7 @@ static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf, my_errno=HA_ERR_END_OF_FILE; goto err; } - if (!(pos= (byte*) _mi_mempack_get_block_info(info, &info->bit_buff, + if (!(pos= (uchar*) _mi_mempack_get_block_info(info, &info->bit_buff, &block_info, &info->rec_buff, (uchar*) (start=share->file_map+ @@ -1596,7 +1597,7 @@ static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf, /* Save length of row */ -uint save_pack_length(uint version, byte *block_buff, ulong length) +uint save_pack_length(uint version, uchar *block_buff, ulong length) { if (length < 254) { diff --git a/storage/myisam/mi_page.c b/storage/myisam/mi_page.c index da9e19275c9..23a2526f756 100644 --- a/storage/myisam/mi_page.c +++ b/storage/myisam/mi_page.c @@ -29,7 +29,7 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo, DBUG_PRINT("enter",("page: %ld", (long) page)); tmp=(uchar*) key_cache_read(info->s->key_cache, - info->s->kfile, page, level, (byte*) buff, + info->s->kfile, page, level, (uchar*) buff, (uint) keyinfo->block_length, (uint) keyinfo->block_length, return_buffer); @@ -80,7 +80,7 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo, DBUG_RETURN((-1)); } DBUG_PRINT("page",("write page at: %lu",(long) page)); - DBUG_DUMP("buff",(byte*) buff,mi_getint(buff)); + DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff)); #endif if ((length=keyinfo->block_length) > IO_SIZE*2 && @@ -89,12 +89,12 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo, #ifdef HAVE_purify { length=mi_getint(buff); - bzero((byte*) buff+length,keyinfo->block_length-length); + bzero((uchar*) buff+length,keyinfo->block_length-length); length=keyinfo->block_length; } #endif DBUG_RETURN((key_cache_write(info->s->key_cache, - info->s->kfile,page, level, (byte*) buff,length, + info->s->kfile,page, level, (uchar*) buff,length, (uint) keyinfo->block_length, (int) ((info->lock_type != F_UNLCK) || info->s->delay_key_write)))); @@ -107,7 +107,7 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos, int level) { my_off_t old_link; - char buff[8]; + uchar buff[8]; DBUG_ENTER("_mi_dispose"); DBUG_PRINT("enter",("pos: %ld", (long) pos)); @@ -128,7 +128,7 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos, my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo, int level) { my_off_t pos; - char buff[8]; + uchar buff[8]; DBUG_ENTER("_mi_new"); if ((pos= info->s->state.key_del[keyinfo->block_size_index]) == diff --git a/storage/myisam/mi_preload.c b/storage/myisam/mi_preload.c index 78729f18424..60ab55106cb 100644 --- a/storage/myisam/mi_preload.c +++ b/storage/myisam/mi_preload.c @@ -55,12 +55,17 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) block_length= keyinfo[0].block_length; - /* Check whether all indexes use the same block size */ - for (i= 1 ; i < keys ; i++) + if (ignore_leaves) { - if (keyinfo[i].block_length != block_length) - DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE); + /* Check whether all indexes use the same block size */ + for (i= 1 ; i < keys ; i++) + { + if (keyinfo[i].block_length != block_length) + DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE); + } } + else + block_length= share->key_cache->key_cache_block_size; length= info->preload_buff_size/block_length * block_length; set_if_bigger(length, block_length); @@ -76,7 +81,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) /* Read the next block of index file into the preload buffer */ if ((my_off_t) length > (key_file_length-pos)) length= (ulong) (key_file_length-pos); - if (my_pread(share->kfile, (byte*) buff, length, pos, MYF(MY_FAE|MY_FNABP))) + if (my_pread(share->kfile, (uchar*) buff, length, pos, MYF(MY_FAE|MY_FNABP))) goto err; if (ignore_leaves) @@ -88,7 +93,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, - (byte*) buff, block_length)) + (uchar*) buff, block_length)) goto err; } pos+= block_length; @@ -100,7 +105,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { if (key_cache_insert(share->key_cache, share->kfile, pos, DFLT_INIT_HITS, - (byte*) buff, length)) + (uchar*) buff, length)) goto err; pos+= length; } diff --git a/storage/myisam/mi_range.c b/storage/myisam/mi_range.c index 6655f5a7de6..932a4abd1b3 100644 --- a/storage/myisam/mi_range.c +++ b/storage/myisam/mi_range.c @@ -21,13 +21,10 @@ #include "myisamdef.h" #include "rt_index.h" -static ha_rows _mi_record_pos(MI_INFO *info,const byte *key,uint key_len, - enum ha_rkey_function search_flag); -static double _mi_search_pos(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *key, - uint key_len,uint nextflag,my_off_t pos); -static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *page, - uchar *keypos,uint *ret_max_key); - +static ha_rows _mi_record_pos(MI_INFO *, const uchar *, key_part_map, + enum ha_rkey_function); +static double _mi_search_pos(MI_INFO *,MI_KEYDEF *,uchar *, uint,uint,my_off_t); +static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *,uchar *, uchar *,uint *); /* Estimate how many records there is in a given range @@ -47,9 +44,8 @@ static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *page, number Estimated number of rows */ - -ha_rows mi_records_in_range(MI_INFO *info, int inx, key_range *min_key, - key_range *max_key) +ha_rows mi_records_in_range(MI_INFO *info, int inx, + key_range *min_key, key_range *max_key) { ha_rows start_pos,end_pos,res; DBUG_ENTER("mi_records_in_range"); @@ -87,7 +83,7 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx, key_range *min_key, } key_buff= info->lastkey+info->s->base.max_key_length; start_key_len= _mi_pack_key(info,inx, key_buff, - (uchar*) min_key->key, min_key->length, + (uchar*) min_key->key, min_key->keypart_map, (HA_KEYSEG**) 0); res= rtree_estimate(info, inx, key_buff, start_key_len, myisam_read_vec[min_key->flag]); @@ -97,24 +93,22 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx, key_range *min_key, #endif case HA_KEY_ALG_BTREE: default: - start_pos= (min_key ? - _mi_record_pos(info, min_key->key, min_key->length, - min_key->flag) : - (ha_rows) 0); - end_pos= (max_key ? - _mi_record_pos(info, max_key->key, max_key->length, - max_key->flag) : - info->state->records+ (ha_rows) 1); + start_pos= (min_key ? _mi_record_pos(info, min_key->key, + min_key->keypart_map, min_key->flag) + : (ha_rows) 0); + end_pos= (max_key ? _mi_record_pos(info, max_key->key, + max_key->keypart_map, max_key->flag) + : info->state->records + (ha_rows) 1); res= (end_pos < start_pos ? (ha_rows) 0 : (end_pos == start_pos ? (ha_rows) 1 : end_pos-start_pos)); if (start_pos == HA_POS_ERROR || end_pos == HA_POS_ERROR) res=HA_POS_ERROR; } - + if (info->s->concurrent_insert) rw_unlock(&info->s->key_root_lock[inx]); fast_mi_writeinfo(info); - + DBUG_PRINT("info",("records: %ld",(ulong) (res))); DBUG_RETURN(res); } @@ -122,21 +116,21 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx, key_range *min_key, /* Find relative position (in records) for key in index-tree */ -static ha_rows _mi_record_pos(MI_INFO *info, const byte *key, uint key_len, +static ha_rows _mi_record_pos(MI_INFO *info, const uchar *key, + key_part_map keypart_map, enum ha_rkey_function search_flag) { - uint inx=(uint) info->lastinx, nextflag; + uint inx=(uint) info->lastinx, nextflag, key_len; MI_KEYDEF *keyinfo=info->s->keyinfo+inx; uchar *key_buff; double pos; DBUG_ENTER("_mi_record_pos"); DBUG_PRINT("enter",("search_flag: %d",search_flag)); + DBUG_ASSERT(keypart_map); - if (key_len == 0) - key_len=USE_WHOLE_KEY; key_buff=info->lastkey+info->s->base.max_key_length; - key_len=_mi_pack_key(info,inx,key_buff,(uchar*) key,key_len, + key_len=_mi_pack_key(info,inx,key_buff,(uchar*) key, keypart_map, (HA_KEYSEG**) 0); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg, (uchar*) key_buff,key_len);); @@ -144,8 +138,42 @@ static ha_rows _mi_record_pos(MI_INFO *info, const byte *key, uint key_len, if (!(nextflag & (SEARCH_FIND | SEARCH_NO_FIND | SEARCH_LAST))) key_len=USE_WHOLE_KEY; + /* + my_handler.c:mi_compare_text() has a flag 'skip_end_space'. + This is set in my_handler.c:ha_key_cmp() in dependence on the + compare flags 'nextflag' and the column type. + + TEXT columns are of type HA_KEYTYPE_VARTEXT. In this case the + condition is skip_end_space= ((nextflag & (SEARCH_FIND | + SEARCH_UPDATE)) == SEARCH_FIND). + + SEARCH_FIND is used for an exact key search. The combination + SEARCH_FIND | SEARCH_UPDATE is used in write/update/delete + operations with a comment like "Not real duplicates", whatever this + means. From the condition above we can see that 'skip_end_space' is + always false for these operations. The result is that trailing space + counts in key comparison and hence, emtpy strings ('', string length + zero, but not NULL) compare less that strings starting with control + characters and these in turn compare less than strings starting with + blanks. + + When estimating the number of records in a key range, we request an + exact search for the minimum key. This translates into a plain + SEARCH_FIND flag. Using this alone would lead to a 'skip_end_space' + compare. Empty strings would be expected above control characters. + Their keys would not be found because they are located below control + characters. + + This is the reason that we add the SEARCH_UPDATE flag here. It makes + the key estimation compare in the same way like key write operations + do. Olny so we will find the keys where they have been inserted. + + Adding the flag unconditionally does not hurt as it is used in the + above mentioned condition only. So it can safely be used together + with other flags. + */ pos=_mi_search_pos(info,keyinfo,key_buff,key_len, - nextflag | SEARCH_SAVE_BUFF, + nextflag | SEARCH_SAVE_BUFF | SEARCH_UPDATE, info->s->state.key_root[inx]); if (pos >= 0.0) { diff --git a/storage/myisam/mi_rfirst.c b/storage/myisam/mi_rfirst.c index d23bda46b1a..5a8b27b3e85 100644 --- a/storage/myisam/mi_rfirst.c +++ b/storage/myisam/mi_rfirst.c @@ -17,7 +17,7 @@ /* Read first row through a specfic key */ -int mi_rfirst(MI_INFO *info, byte *buf, int inx) +int mi_rfirst(MI_INFO *info, uchar *buf, int inx) { DBUG_ENTER("mi_rfirst"); info->lastpos= HA_OFFSET_ERROR; diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index 6323c95ffd7..f1d35810d36 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -21,8 +21,8 @@ /* Read a record using key */ /* Ordinary search_flag is 0 ; Give error if no record with key */ -int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, - enum ha_rkey_function search_flag) +int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key, + key_part_map keypart_map, enum ha_rkey_function search_flag) { uchar *key_buff; MYISAM_SHARE *share=info->s; @@ -30,7 +30,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, HA_KEYSEG *last_used_keyseg; uint pack_key_length, use_key_length, nextflag; DBUG_ENTER("mi_rkey"); - DBUG_PRINT("enter", ("base: %lx buf: %lx inx: %d search_flag: %d", + DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d", (long) info, (long) buf, inx, search_flag)); if ((inx = _mi_check_index(info,inx)) < 0) @@ -45,22 +45,24 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, info->once_flags&= ~USE_PACKED_KEYS; /* Reset flag */ /* key is already packed!; This happens when we are using a MERGE TABLE + In this key 'key_part_map' is the length of the key ! */ key_buff=info->lastkey+info->s->base.max_key_length; - pack_key_length= key_len; - bmove(key_buff,key,key_len); - last_used_keyseg= 0; + pack_key_length= keypart_map; + bmove(key_buff, key, pack_key_length); + last_used_keyseg= info->s->keyinfo[inx].seg + info->last_used_keyseg; } else { - if (key_len == 0) - key_len=USE_WHOLE_KEY; + DBUG_ASSERT(keypart_map); /* Save the packed key for later use in the second buffer of lastkey. */ key_buff=info->lastkey+info->s->base.max_key_length; pack_key_length=_mi_pack_key(info,(uint) inx, key_buff, (uchar*) key, - key_len, &last_used_keyseg); + keypart_map, &last_used_keyseg); /* Save packed_key_length for use by the MERGE engine. */ info->pack_key_length= pack_key_length; + info->last_used_keyseg= (uint16) (last_used_keyseg - + info->s->keyinfo[inx].seg); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE, keyinfo->seg, key_buff, pack_key_length);); } @@ -93,42 +95,63 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, myisam_read_vec[search_flag], info->s->state.key_root[inx])) { /* - If we searching for a partial key (or using >, >=, < or <=) and - the data is outside of the data file, we need to continue searching - for the first key inside the data file + Found a key, but it might not be usable. We cannot use rows that + are inserted by other threads after we got our table lock + ("concurrent inserts"). The record may not even be present yet. + Keys are inserted into the index(es) before the record is + inserted into the data file. When we got our table lock, we + saved the current data_file_length. Concurrent inserts always go + to the end of the file. So we can test if the found key + references a new record. */ - if (info->lastpos >= info->state->data_file_length && - (search_flag != HA_READ_KEY_EXACT || - last_used_keyseg != keyinfo->seg + keyinfo->keysegs)) + if (info->lastpos >= info->state->data_file_length) { - do + /* The key references a concurrently inserted record. */ + if (search_flag == HA_READ_KEY_EXACT && + last_used_keyseg == keyinfo->seg + keyinfo->keysegs) + { + /* Simply ignore the key if it matches exactly. (Bug #29838) */ + my_errno= HA_ERR_KEY_NOT_FOUND; + info->lastpos= HA_OFFSET_ERROR; + } + else { - uint not_used[2]; - /* - Skip rows that are inserted by other threads since we got a lock - Note that this can only happen if we are not searching after an - full length exact key, because the keys are sorted - according to position - */ - if (_mi_search_next(info, keyinfo, info->lastkey, - info->lastkey_length, - myisam_readnext_vec[search_flag], - info->s->state.key_root[inx])) - break; /* - Check that the found key does still match the search. - _mi_search_next() delivers the next key regardless of its - value. + If searching for a partial key (or using >, >=, < or <=) and + the data is outside of the data file, we need to continue + searching for the first key inside the data file. */ - if (search_flag == HA_READ_KEY_EXACT && - ha_key_cmp(keyinfo->seg, key_buff, info->lastkey, use_key_length, - SEARCH_FIND, not_used)) + do { - my_errno= HA_ERR_KEY_NOT_FOUND; - info->lastpos= HA_OFFSET_ERROR; - break; - } - } while (info->lastpos >= info->state->data_file_length); + uint not_used[2]; + /* + Skip rows that are inserted by other threads since we got + a lock. Note that this can only happen if we are not + searching after a full length exact key, because the keys + are sorted according to position. + */ + if (_mi_search_next(info, keyinfo, info->lastkey, + info->lastkey_length, + myisam_readnext_vec[search_flag], + info->s->state.key_root[inx])) + break; /* purecov: inspected */ + /* + Check that the found key does still match the search. + _mi_search_next() delivers the next key regardless of its + value. + */ + if (search_flag == HA_READ_KEY_EXACT && + ha_key_cmp(keyinfo->seg, key_buff, info->lastkey, + use_key_length, SEARCH_FIND, not_used)) + { + /* purecov: begin inspected */ + my_errno= HA_ERR_KEY_NOT_FOUND; + info->lastpos= HA_OFFSET_ERROR; + break; + /* purecov: end */ + } + } while (info->lastpos >= info->state->data_file_length); + } } } } diff --git a/storage/myisam/mi_rlast.c b/storage/myisam/mi_rlast.c index 7805755ab70..07be619617f 100644 --- a/storage/myisam/mi_rlast.c +++ b/storage/myisam/mi_rlast.c @@ -17,7 +17,7 @@ /* Read last row with the same key as the previous read. */ -int mi_rlast(MI_INFO *info, byte *buf, int inx) +int mi_rlast(MI_INFO *info, uchar *buf, int inx) { DBUG_ENTER("mi_rlast"); info->lastpos= HA_OFFSET_ERROR; diff --git a/storage/myisam/mi_rnext.c b/storage/myisam/mi_rnext.c index f6a0a47413e..7ce66d41e0f 100644 --- a/storage/myisam/mi_rnext.c +++ b/storage/myisam/mi_rnext.c @@ -24,7 +24,7 @@ based on the position of the last used key! */ -int mi_rnext(MI_INFO *info, byte *buf, int inx) +int mi_rnext(MI_INFO *info, uchar *buf, int inx) { int error,changed; uint flag; diff --git a/storage/myisam/mi_rnext_same.c b/storage/myisam/mi_rnext_same.c index 3a7004bf47c..1892fe3e1e0 100644 --- a/storage/myisam/mi_rnext_same.c +++ b/storage/myisam/mi_rnext_same.c @@ -24,7 +24,7 @@ based on the position of the last used key! */ -int mi_rnext_same(MI_INFO *info, byte *buf) +int mi_rnext_same(MI_INFO *info, uchar *buf) { int error; uint inx,not_used[2]; diff --git a/storage/myisam/mi_rprev.c b/storage/myisam/mi_rprev.c index 09802627185..d1407012590 100644 --- a/storage/myisam/mi_rprev.c +++ b/storage/myisam/mi_rprev.c @@ -22,7 +22,7 @@ based on the position of the last used key! */ -int mi_rprev(MI_INFO *info, byte *buf, int inx) +int mi_rprev(MI_INFO *info, uchar *buf, int inx) { int error,changed; register uint flag; diff --git a/storage/myisam/mi_rrnd.c b/storage/myisam/mi_rrnd.c index d31e6c24a37..211e5fa51cc 100644 --- a/storage/myisam/mi_rrnd.c +++ b/storage/myisam/mi_rrnd.c @@ -29,7 +29,7 @@ HA_ERR_END_OF_FILE = EOF. */ -int mi_rrnd(MI_INFO *info, byte *buf, register my_off_t filepos) +int mi_rrnd(MI_INFO *info, uchar *buf, register my_off_t filepos) { my_bool skip_deleted_blocks; DBUG_ENTER("mi_rrnd"); diff --git a/storage/myisam/mi_rsame.c b/storage/myisam/mi_rsame.c index 4831ebb3d7c..8093498483f 100644 --- a/storage/myisam/mi_rsame.c +++ b/storage/myisam/mi_rsame.c @@ -25,7 +25,7 @@ */ -int mi_rsame(MI_INFO *info, byte *record, int inx) +int mi_rsame(MI_INFO *info, uchar *record, int inx) { DBUG_ENTER("mi_rsame"); diff --git a/storage/myisam/mi_rsamepos.c b/storage/myisam/mi_rsamepos.c index 717b9ab52d5..6a1e462b686 100644 --- a/storage/myisam/mi_rsamepos.c +++ b/storage/myisam/mi_rsamepos.c @@ -27,7 +27,7 @@ ** HA_ERR_END_OF_FILE = End of file */ -int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos) +int mi_rsame_with_pos(MI_INFO *info, uchar *record, int inx, my_off_t filepos) { DBUG_ENTER("mi_rsame_with_pos"); DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos)); diff --git a/storage/myisam/mi_scan.c b/storage/myisam/mi_scan.c index 87debb67b37..a225b399660 100644 --- a/storage/myisam/mi_scan.c +++ b/storage/myisam/mi_scan.c @@ -36,7 +36,7 @@ int mi_scan_init(register MI_INFO *info) HA_ERR_END_OF_FILE = EOF. */ -int mi_scan(MI_INFO *info, byte *buf) +int mi_scan(MI_INFO *info, uchar *buf) { DBUG_ENTER("mi_scan"); /* Init all but update-flag */ diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c index fb3bdd4409e..2195ac178dd 100644 --- a/storage/myisam/mi_search.c +++ b/storage/myisam/mi_search.c @@ -78,7 +78,7 @@ int _mi_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff, test(!(nextflag & SEARCH_SAVE_BUFF))))) goto err; - DBUG_DUMP("page",(byte*) buff,mi_getint(buff)); + DBUG_DUMP("page",(uchar*) buff,mi_getint(buff)); flag=(*keyinfo->bin_search)(info,keyinfo,buff,key,key_len,nextflag, &keypos,lastkey, &last_key); @@ -472,9 +472,9 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, else { /* We have to compare k and vseg as if they were space extended */ - uchar *end= k+ (cmplen - len); - for ( ; k < end && *k == ' '; k++) ; - if (k == end) + uchar *k_end= k+ (cmplen - len); + for ( ; k < k_end && *k == ' '; k++) ; + if (k == k_end) goto cmp_rest; /* should never happen */ if (*k < (uchar) ' ') { @@ -486,15 +486,15 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, } else if (len > cmplen) { - uchar *end; + uchar *vseg_end; if ((nextflag & SEARCH_PREFIX) && key_len_left == 0) goto fix_flag; /* We have to compare k and vseg as if they were space extended */ - for (end=vseg + (len-cmplen) ; - vseg < end && *vseg == (uchar) ' '; + for (vseg_end= vseg + (len-cmplen) ; + vseg < vseg_end && *vseg == (uchar) ' '; vseg++, matched++) ; - DBUG_ASSERT(vseg < end); + DBUG_ASSERT(vseg < vseg_end); if (*vseg > (uchar) ' ') { @@ -753,7 +753,7 @@ void _mi_dpointer(MI_INFO *info, uchar *buff, my_off_t pos) uint _mi_get_static_key(register MI_KEYDEF *keyinfo, uint nod_flag, register uchar **page, register uchar *key) { - memcpy((byte*) key,(byte*) *page, + memcpy((uchar*) key,(uchar*) *page, (size_t) (keyinfo->keylength+nod_flag)); *page+=keyinfo->keylength+nod_flag; return(keyinfo->keylength); @@ -836,7 +836,7 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, if (tot_length >= 255 && *start != 255) { /* length prefix changed from a length of one to a length of 3 */ - bmove_upp((char*) key+length+3,(char*) key+length+1,length); + bmove_upp(key+length+3, key+length+1, length); *key=255; mi_int2store(key+1,tot_length); key+=3+length; @@ -897,12 +897,12 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, else length=keyseg->length; } - memcpy((byte*) key,(byte*) page,(size_t) length); + memcpy((uchar*) key,(uchar*) page,(size_t) length); key+=length; page+=length; } length=keyseg->length+nod_flag; - bmove((byte*) key,(byte*) page,length); + bmove((uchar*) key,(uchar*) page,length); *page_pos= page+length; return ((uint) (key-start_key)+keyseg->length); } /* _mi_get_pack_key */ @@ -926,11 +926,16 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, /* Keys are compressed the following way: - prefix length Packed length of prefix for the prev key. (1 or 3 bytes) + prefix length Packed length of prefix common with prev key (1 or 3 bytes) for each key segment: [is null] Null indicator if can be null (1 byte, zero means null) [length] Packed length if varlength (1 or 3 bytes) + key segment 'length' bytes of key segment value pointer Reference to the data file (last_keyseg->length). + + get_key_length() is a macro. It gets the prefix length from 'page' + and puts it into 'length'. It increments 'page' by 1 or 3, depending + on the packed length of the prefix length. */ get_key_length(length,page); if (length) @@ -945,34 +950,44 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, my_errno=HA_ERR_CRASHED; DBUG_RETURN(0); /* Wrong key */ } - from=key; from_end=key+length; + /* Key is packed against prev key, take prefix from prev key. */ + from= key; + from_end= key + length; } else { - from=page; from_end=page_end; /* Not packed key */ + /* Key is not packed against prev key, take all from page buffer. */ + from= page; + from_end= page_end; } /* - The trouble is that key is split in two parts: - The first part is in from ...from_end-1. - The second part starts at page + The trouble is that key can be split in two parts: + The first part (prefix) is in from .. from_end - 1. + The second part starts at page. + The split can be at every byte position. So we need to check for + the end of the first part before using every byte. */ for (keyseg=keyinfo->seg ; keyseg->type ;keyseg++) { if (keyseg->flag & HA_NULL_PART) { + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } if (!(*key++ = *from++)) continue; /* Null part */ } if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART | HA_SPACE_PACK)) { - /* Get length of dynamic length key part */ + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } + /* Get length of dynamic length key part */ if ((length= (*key++ = *from++)) == 255) { + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } length= (uint) ((*key++ = *from++)) << 8; + /* If prefix is used up, switch to rest. */ if (from == from_end) { from=page; from_end=page_end; } length+= (uint) ((*key++ = *from++)); } @@ -988,18 +1003,30 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, } DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u", (long) key, (long) from, length)); - memmove((byte*) key, (byte*) from, (size_t) length); + memmove((uchar*) key, (uchar*) from, (size_t) length); key+=length; from+=length; } + /* + Last segment (type == 0) contains length of data pointer. + If we have mixed key blocks with data pointer and key block pointer, + we have to copy both. + */ length=keyseg->length+nod_flag; if ((tmp=(uint) (from_end-from)) <= length) { + /* Remaining length is less or equal max possible length. */ memcpy(key+tmp,page,length-tmp); /* Get last part of key */ *page_pos= page+length-tmp; } else { + /* + Remaining length is greater than max possible length. + This can happen only if we switched to the new key bytes already. + 'page_end' is calculated with MI_MAX_KEY_BUFF. So it can be far + behind the real end of the key. + */ if (from_end != page_end) { DBUG_PRINT("error",("Error when unpacking key")); @@ -1007,7 +1034,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, my_errno=HA_ERR_CRASHED; DBUG_RETURN(0); /* Error */ } - memcpy((byte*) key,(byte*) from,(size_t) length); + /* Copy data pointer and, if appropriate, key block pointer. */ + memcpy((uchar*) key,(uchar*) from,(size_t) length); *page_pos= from+length; } DBUG_RETURN((uint) (key-start_key)+keyseg->length); @@ -1026,7 +1054,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, nod_flag=mi_test_if_nod(page); if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY))) { - bmove((byte*) key,(byte*) keypos,keyinfo->keylength+nod_flag); + bmove((uchar*) key,(uchar*) keypos,keyinfo->keylength+nod_flag); DBUG_RETURN(keypos+keyinfo->keylength+nod_flag); } else @@ -1064,7 +1092,7 @@ static my_bool _mi_get_prev_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY))) { *return_key_length=keyinfo->keylength; - bmove((byte*) key,(byte*) keypos- *return_key_length-nod_flag, + bmove((uchar*) key,(uchar*) keypos- *return_key_length-nod_flag, *return_key_length); DBUG_RETURN(0); } @@ -1106,7 +1134,7 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, lastpos=endpos-keyinfo->keylength-nod_flag; *return_key_length=keyinfo->keylength; if (lastpos > page) - bmove((byte*) lastkey,(byte*) lastpos,keyinfo->keylength+nod_flag); + bmove((uchar*) lastkey,(uchar*) lastpos,keyinfo->keylength+nod_flag); } else { @@ -1197,7 +1225,7 @@ uint _mi_keylength_part(MI_KEYDEF *keyinfo, register uchar *key, uchar *_mi_move_key(MI_KEYDEF *keyinfo, uchar *to, uchar *from) { reg1 uint length; - memcpy((byte*) to, (byte*) from, + memcpy((uchar*) to, (uchar*) from, (size_t) (length=_mi_keylength(keyinfo,from))); return to+length; } @@ -1799,7 +1827,7 @@ void _mi_store_static_key(MI_KEYDEF *keyinfo __attribute__((unused)), register uchar *key_pos, register MI_KEY_PARAM *s_temp) { - memcpy((byte*) key_pos,(byte*) s_temp->key,(size_t) s_temp->totlength); + memcpy((uchar*) key_pos,(uchar*) s_temp->key,(size_t) s_temp->totlength); } @@ -1832,7 +1860,7 @@ void _mi_store_var_pack_key(MI_KEYDEF *keyinfo __attribute__((unused)), /* Not packed against previous key */ store_pack_length(s_temp->pack_marker == 128,key_pos,s_temp->key_length); } - bmove((byte*) key_pos,(byte*) s_temp->key, + bmove((uchar*) key_pos,(uchar*) s_temp->key, (length=s_temp->totlength-(uint) (key_pos-start))); if (!s_temp->next_key_pos) /* No following key */ diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c index 21a25f66b7c..c92d577b621 100644 --- a/storage/myisam/mi_static.c +++ b/storage/myisam/mi_static.c @@ -27,7 +27,7 @@ uchar NEAR myisam_file_magic[]= { (uchar) 254, (uchar) 254,'\007', '\001', }; uchar NEAR myisam_pack_file_magic[]= { (uchar) 254, (uchar) 254,'\010', '\002', }; -my_string myisam_log_filename=(char*) "myisam.log"; +char * myisam_log_filename=(char*) "myisam.log"; File myisam_log_file= -1; uint myisam_quick_table_bits=9; ulong myisam_block_size= MI_KEY_BLOCK_LENGTH; /* Best by test */ diff --git a/storage/myisam/mi_statrec.c b/storage/myisam/mi_statrec.c index 3f92ec31d4c..e3771560c01 100644 --- a/storage/myisam/mi_statrec.c +++ b/storage/myisam/mi_statrec.c @@ -18,7 +18,7 @@ #include "myisamdef.h" -int _mi_write_static_record(MI_INFO *info, const byte *record) +int _mi_write_static_record(MI_INFO *info, const uchar *record) { uchar temp[8]; /* max pointer length */ if (info->s->state.dellink != HA_OFFSET_ERROR && @@ -26,14 +26,14 @@ int _mi_write_static_record(MI_INFO *info, const byte *record) { my_off_t filepos=info->s->state.dellink; info->rec_cache.seek_not_done=1; /* We have done a seek */ - if (info->s->file_read(info,(char*) &temp[0],info->s->base.rec_reflength, + if (info->s->file_read(info, &temp[0],info->s->base.rec_reflength, info->s->state.dellink+1, MYF(MY_NABP))) goto err; info->s->state.dellink= _mi_rec_pos(info->s,temp); info->state->del--; info->state->empty-=info->s->base.pack_reclength; - if (info->s->file_write(info, (char*) record, info->s->base.reclength, + if (info->s->file_write(info, record, info->s->base.reclength, filepos, MYF(MY_NABP))) goto err; @@ -48,29 +48,29 @@ int _mi_write_static_record(MI_INFO *info, const byte *record) } if (info->opt_flag & WRITE_CACHE_USED) { /* Cash in use */ - if (my_b_write(&info->rec_cache, (byte*) record, + if (my_b_write(&info->rec_cache, record, info->s->base.reclength)) goto err; if (info->s->base.pack_reclength != info->s->base.reclength) { uint length=info->s->base.pack_reclength - info->s->base.reclength; - bzero((char*) temp,length); - if (my_b_write(&info->rec_cache, (byte*) temp,length)) + bzero(temp,length); + if (my_b_write(&info->rec_cache, temp,length)) goto err; } } else { info->rec_cache.seek_not_done=1; /* We have done a seek */ - if (info->s->file_write(info,(char*) record,info->s->base.reclength, + if (info->s->file_write(info, record, info->s->base.reclength, info->state->data_file_length, info->s->write_flag)) goto err; if (info->s->base.pack_reclength != info->s->base.reclength) { uint length=info->s->base.pack_reclength - info->s->base.reclength; - bzero((char*) temp,length); - if (info->s->file_write(info, (byte*) temp,length, + bzero(temp,length); + if (info->s->file_write(info, temp,length, info->state->data_file_length+ info->s->base.reclength, info->s->write_flag)) @@ -85,13 +85,13 @@ int _mi_write_static_record(MI_INFO *info, const byte *record) return 1; } -int _mi_update_static_record(MI_INFO *info, my_off_t pos, const byte *record) +int _mi_update_static_record(MI_INFO *info, my_off_t pos, const uchar *record) { info->rec_cache.seek_not_done=1; /* We have done a seek */ return (info->s->file_write(info, - (char*) record,info->s->base.reclength, - pos, - MYF(MY_NABP)) != 0); + record, info->s->base.reclength, + pos, + MYF(MY_NABP)) != 0); } @@ -105,12 +105,12 @@ int _mi_delete_static_record(MI_INFO *info) _mi_dpointer(info,temp+1,info->s->state.dellink); info->s->state.dellink = info->lastpos; info->rec_cache.seek_not_done=1; - return (info->s->file_write(info,(byte*) temp, 1+info->s->rec_reflength, + return (info->s->file_write(info,(uchar*) temp, 1+info->s->rec_reflength, info->lastpos, MYF(MY_NABP)) != 0); } -int _mi_cmp_static_record(register MI_INFO *info, register const byte *old) +int _mi_cmp_static_record(register MI_INFO *info, register const uchar *old) { DBUG_ENTER("_mi_cmp_static_record"); @@ -129,11 +129,11 @@ int _mi_cmp_static_record(register MI_INFO *info, register const byte *old) if ((info->opt_flag & READ_CHECK_USED)) { /* If check isn't disabled */ info->rec_cache.seek_not_done=1; /* We have done a seek */ - if (info->s->file_read(info, (char*) info->rec_buff, info->s->base.reclength, + if (info->s->file_read(info, info->rec_buff, info->s->base.reclength, info->lastpos, MYF(MY_NABP))) DBUG_RETURN(-1); - if (memcmp((byte*) info->rec_buff, (byte*) old, + if (memcmp(info->rec_buff, old, (uint) info->s->base.reclength)) { DBUG_DUMP("read",old,info->s->base.reclength); @@ -147,12 +147,12 @@ int _mi_cmp_static_record(register MI_INFO *info, register const byte *old) int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def, - const byte *record, my_off_t pos) + const uchar *record, my_off_t pos) { DBUG_ENTER("_mi_cmp_static_unique"); info->rec_cache.seek_not_done=1; /* We have done a seek */ - if (info->s->file_read(info, (char*) info->rec_buff, info->s->base.reclength, + if (info->s->file_read(info, info->rec_buff, info->s->base.reclength, pos, MYF(MY_NABP))) DBUG_RETURN(-1); DBUG_RETURN(mi_unique_comp(def, record, info->rec_buff, @@ -166,7 +166,7 @@ int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def, /* MY_FILE_ERROR on read-error or locking-error */ int _mi_read_static_record(register MI_INFO *info, register my_off_t pos, - register byte *record) + register uchar *record) { int error; @@ -178,7 +178,7 @@ int _mi_read_static_record(register MI_INFO *info, register my_off_t pos, return(-1); info->rec_cache.seek_not_done=1; /* We have done a seek */ - error=info->s->file_read(info,(char*) record,info->s->base.reclength, + error=info->s->file_read(info, record, info->s->base.reclength, pos,MYF(MY_NABP)) != 0; fast_mi_writeinfo(info); if (! error) @@ -199,7 +199,7 @@ int _mi_read_static_record(register MI_INFO *info, register my_off_t pos, -int _mi_read_rnd_static_record(MI_INFO *info, byte *buf, +int _mi_read_rnd_static_record(MI_INFO *info, uchar *buf, register my_off_t filepos, my_bool skip_deleted_blocks) { @@ -274,11 +274,11 @@ int _mi_read_rnd_static_record(MI_INFO *info, byte *buf, } /* Read record with cacheing */ - error=my_b_read(&info->rec_cache,(byte*) buf,share->base.reclength); + error=my_b_read(&info->rec_cache,(uchar*) buf,share->base.reclength); if (info->s->base.pack_reclength != info->s->base.reclength && !error) { char tmp[8]; /* Skill fill bytes */ - error=my_b_read(&info->rec_cache,(byte*) tmp, + error=my_b_read(&info->rec_cache,(uchar*) tmp, info->s->base.pack_reclength - info->s->base.reclength); } if (locked) diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index ebb9cdcb2f7..a68bcbed56c 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -40,9 +40,9 @@ static HA_KEYSEG uniqueseg[10]; static int run_test(const char *filename); static void get_options(int argc, char *argv[]); -static void create_key(char *key,uint rownr); -static void create_record(char *record,uint rownr); -static void update_record(char *record); +static void create_key(uchar *key,uint rownr); +static void create_record(uchar *record,uint rownr); +static void update_record(uchar *record); int main(int argc,char *argv[]) { @@ -62,7 +62,7 @@ static int run_test(const char *filename) int i,j,error,deleted,rec_length,uniques=0; ha_rows found,row_count; my_off_t pos; - char record[MAX_REC_LENGTH],key[MAX_REC_LENGTH],read_record[MAX_REC_LENGTH]; + uchar record[MAX_REC_LENGTH],key[MAX_REC_LENGTH],read_record[MAX_REC_LENGTH]; MI_UNIQUEDEF uniquedef; MI_CREATE_INFO create_info; @@ -109,7 +109,7 @@ static int run_test(const char *filename) } keyinfo[0].flag = (uint8) (pack_keys | unique_key); - bzero((byte*) flags,sizeof(flags)); + bzero((uchar*) flags,sizeof(flags)); if (opt_unique) { uint start; @@ -258,7 +258,8 @@ static int run_test(const char *filename) continue; create_key(key,j); my_errno=0; - if ((error = mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))) + if ((error = mi_rkey(file,read_record,0,key,HA_WHOLE_KEY, + HA_READ_KEY_EXACT))) { if (verbose || (flags[j] >= 1 || (error && my_errno != HA_ERR_KEY_NOT_FOUND))) @@ -285,7 +286,7 @@ static int run_test(const char *filename) { create_key(key,i); my_errno=0; - error=mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT); + error=mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT); if (verbose || (error == 0 && flags[i] == 0 && unique_key) || (error && (flags[i] != 0 || my_errno != HA_ERR_KEY_NOT_FOUND))) @@ -326,20 +327,20 @@ err: } -static void create_key_part(char *key,uint rownr) +static void create_key_part(uchar *key,uint rownr) { if (!unique_key) rownr&=7; /* Some identical keys */ if (keyinfo[0].seg[0].type == HA_KEYTYPE_NUM) { - sprintf(key,"%*d",keyinfo[0].seg[0].length,rownr); + sprintf((char*) key,"%*d",keyinfo[0].seg[0].length,rownr); } else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT1 || keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT2) { /* Alpha record */ /* Create a key that may be easily packed */ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B'); - sprintf(key+keyinfo[0].seg[0].length-2,"%-2d",rownr); + sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr); if ((rownr & 7) == 0) { /* Change the key to force a unpack of the next key */ @@ -349,12 +350,12 @@ static void create_key_part(char *key,uint rownr) else { /* Alpha record */ if (keyinfo[0].seg[0].flag & HA_SPACE_PACK) - sprintf(key,"%-*d",keyinfo[0].seg[0].length,rownr); + sprintf((char*) key,"%-*d",keyinfo[0].seg[0].length,rownr); else { /* Create a key that may be easily packed */ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B'); - sprintf(key+keyinfo[0].seg[0].length-2,"%-2d",rownr); + sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr); if ((rownr & 7) == 0) { /* Change the key to force a unpack of the next key */ @@ -365,7 +366,7 @@ static void create_key_part(char *key,uint rownr) } -static void create_key(char *key,uint rownr) +static void create_key(uchar *key,uint rownr) { if (keyinfo[0].seg[0].null_bit) { @@ -381,7 +382,7 @@ static void create_key(char *key,uint rownr) { uint tmp; create_key_part(key+2,rownr); - tmp=strlen(key+2); + tmp=strlen((char*) key+2); int2store(key,tmp); } else @@ -389,13 +390,13 @@ static void create_key(char *key,uint rownr) } -static char blob_key[MAX_REC_LENGTH]; -static char blob_record[MAX_REC_LENGTH+20*20]; +static uchar blob_key[MAX_REC_LENGTH]; +static uchar blob_record[MAX_REC_LENGTH+20*20]; -static void create_record(char *record,uint rownr) +static void create_record(uchar *record,uint rownr) { - char *pos; + uchar *pos; bzero((char*) record,MAX_REC_LENGTH); record[0]=1; /* delete marker */ if (rownr == 0 && keyinfo[0].seg[0].null_bit) @@ -405,9 +406,9 @@ static void create_record(char *record,uint rownr) if (recinfo[1].type == FIELD_BLOB) { uint tmp; - char *ptr; + uchar *ptr; create_key_part(blob_key,rownr); - tmp=strlen(blob_key); + tmp=strlen((char*) blob_key); int4store(pos,tmp); ptr=blob_key; memcpy_fixed(pos+4,&ptr,sizeof(char*)); @@ -417,7 +418,7 @@ static void create_record(char *record,uint rownr) { uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); create_key_part(pos+pack_length,rownr); - tmp= strlen(pos+pack_length); + tmp= strlen((char*) pos+pack_length); if (pack_length == 1) *(uchar*) pos= (uchar) tmp; else @@ -432,10 +433,10 @@ static void create_record(char *record,uint rownr) if (recinfo[2].type == FIELD_BLOB) { uint tmp; - char *ptr;; - sprintf(blob_record,"... row: %d", rownr); - strappend(blob_record,max(MAX_REC_LENGTH-rownr,10),' '); - tmp=strlen(blob_record); + uchar *ptr;; + sprintf((char*) blob_record,"... row: %d", rownr); + strappend((char*) blob_record,max(MAX_REC_LENGTH-rownr,10),' '); + tmp=strlen((char*) blob_record); int4store(pos,tmp); ptr=blob_record; memcpy_fixed(pos+4,&ptr,sizeof(char*)); @@ -443,28 +444,28 @@ static void create_record(char *record,uint rownr) else if (recinfo[2].type == FIELD_VARCHAR) { uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); - sprintf(pos+pack_length, "... row: %d", rownr); - tmp= strlen(pos+pack_length); + sprintf((char*) pos+pack_length, "... row: %d", rownr); + tmp= strlen((char*) pos+pack_length); if (pack_length == 1) - *(uchar*) pos= (uchar) tmp; + *pos= (uchar) tmp; else int2store(pos,tmp); } else { - sprintf(pos,"... row: %d", rownr); - strappend(pos,recinfo[2].length,' '); + sprintf((char*) pos,"... row: %d", rownr); + strappend((char*) pos,recinfo[2].length,' '); } } /* change row to test re-packing of rows and reallocation of keys */ -static void update_record(char *record) +static void update_record(uchar *record) { - char *pos=record+1; + uchar *pos=record+1; if (recinfo[1].type == FIELD_BLOB) { - char *column,*ptr; + uchar *column,*ptr; int length; length=uint4korr(pos); /* Long blob */ memcpy_fixed(&column,pos+4,sizeof(char*)); @@ -473,7 +474,8 @@ static void update_record(char *record) memcpy_fixed(pos+4,&ptr,sizeof(char*)); /* Store pointer to new key */ if (keyinfo[0].seg[0].type != HA_KEYTYPE_NUM) default_charset_info->cset->casedn(default_charset_info, - blob_key, length, blob_key, length); + (char*) blob_key, length, + (char*) blob_key, length); pos+=recinfo[1].length; } else if (recinfo[1].type == FIELD_VARCHAR) @@ -481,22 +483,22 @@ static void update_record(char *record) uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos); default_charset_info->cset->casedn(default_charset_info, - pos + pack_length, length, - pos + pack_length, length); + (char*) pos + pack_length, length, + (char*) pos + pack_length, length); pos+=recinfo[1].length; } else { if (keyinfo[0].seg[0].type != HA_KEYTYPE_NUM) default_charset_info->cset->casedn(default_charset_info, - pos, keyinfo[0].seg[0].length, - pos, keyinfo[0].seg[0].length); + (char*) pos, keyinfo[0].seg[0].length, + (char*) pos, keyinfo[0].seg[0].length); pos+=recinfo[1].length; } if (recinfo[2].type == FIELD_BLOB) { - char *column; + uchar *column; int length; length=uint4korr(pos); memcpy_fixed(&column,pos+4,sizeof(char*)); @@ -504,7 +506,7 @@ static void update_record(char *record) bfill(blob_record+length,20,'.'); /* Make it larger */ length+=20; int4store(pos,length); - column=blob_record; + column= blob_record; memcpy_fixed(pos+4,&column,sizeof(char*)); } else if (recinfo[2].type == FIELD_VARCHAR) @@ -534,21 +536,21 @@ static struct my_option my_long_options[] = {"debug", '#', "Undocumented", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"delete_rows", 'd', "Undocumented", (gptr*) &remove_count, - (gptr*) &remove_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, + {"delete_rows", 'd', "Undocumented", (uchar**) &remove_count, + (uchar**) &remove_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, {"help", '?', "Display help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"insert_rows", 'i', "Undocumented", (gptr*) &insert_count, - (gptr*) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, + {"insert_rows", 'i', "Undocumented", (uchar**) &insert_count, + (uchar**) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, {"key_alpha", 'a', "Use a key of type HA_KEYTYPE_TEXT", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_binary_pack", 'B', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_blob", 'b', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"key_cache", 'K', "Undocumented", (gptr*) &key_cacheing, - (gptr*) &key_cacheing, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"key_length", 'k', "Undocumented", (gptr*) &key_length, (gptr*) &key_length, + {"key_cache", 'K', "Undocumented", (uchar**) &key_cacheing, + (uchar**) &key_cacheing, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"key_length", 'k', "Undocumented", (uchar**) &key_length, (uchar**) &key_length, 0, GET_UINT, REQUIRED_ARG, 6, 0, 0, 0, 0, 0}, {"key_multiple", 'm', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -559,21 +561,21 @@ static struct my_option my_long_options[] = {"key_varchar", 'w', "Test VARCHAR keys", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"null_fields", 'N', "Define fields with NULL", - (gptr*) &null_fields, (gptr*) &null_fields, 0, GET_BOOL, NO_ARG, + (uchar**) &null_fields, (uchar**) &null_fields, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"row_fixed_size", 'S', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"row_pointer_size", 'R', "Undocumented", (gptr*) &rec_pointer_size, - (gptr*) &rec_pointer_size, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"row_pointer_size", 'R', "Undocumented", (uchar**) &rec_pointer_size, + (uchar**) &rec_pointer_size, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"silent", 's', "Undocumented", - (gptr*) &silent, (gptr*) &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip_update", 'U', "Undocumented", (gptr*) &skip_update, - (gptr*) &skip_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"unique", 'C', "Undocumented", (gptr*) &opt_unique, (gptr*) &opt_unique, 0, + (uchar**) &silent, (uchar**) &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"skip_update", 'U', "Undocumented", (uchar**) &skip_update, + (uchar**) &skip_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"unique", 'C', "Undocumented", (uchar**) &opt_unique, (uchar**) &opt_unique, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"update_rows", 'u', "Undocumented", (gptr*) &update_count, - (gptr*) &update_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, - {"verbose", 'v', "Be more verbose", (gptr*) &verbose, (gptr*) &verbose, 0, + {"update_rows", 'u', "Undocumented", (uchar**) &update_count, + (uchar**) &update_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, + {"verbose", 'v', "Be more verbose", (uchar**) &verbose, (uchar**) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Print version number and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c index 878bba31ea8..902801b5e6e 100644 --- a/storage/myisam/mi_test2.c +++ b/storage/myisam/mi_test2.c @@ -36,8 +36,8 @@ static void get_options(int argc, char *argv[]); static uint rnd(uint max_value); -static void fix_length(byte *record,uint length); -static void put_blob_in_record(char *blob_pos,char **blob_buffer); +static void fix_length(uchar *record,uint length); +static void put_blob_in_record(uchar *blob_pos,char **blob_buffer); static void copy_key(struct st_myisam_info *info,uint inx, uchar *record,uchar *key); @@ -53,8 +53,8 @@ static uint key_cache_block_size= KEY_CACHE_BLOCK_SIZE; static uint keys=MYISAM_KEYS,recant=1000; static uint use_blob=0; static uint16 key1[1001],key3[5000]; -static char record[300],record2[300],key[100],key2[100], - read_record[300],read_record2[300],read_record3[300]; +static uchar record[300],record2[300],key[100],key2[100]; +static uchar read_record[300],read_record2[300],read_record3[300]; static HA_KEYSEG glob_keyseg[MYISAM_KEYS][MAX_PARTS]; /* Test program */ @@ -231,7 +231,7 @@ int main(int argc, char *argv[]) for (i=0 ; i < recant ; i++) { n1=rnd(1000); n2=rnd(100); n3=rnd(5000); - sprintf(record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count); + sprintf((char*) record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count); int4store(record+STANDARD_LENGTH-4,(long) i); fix_length(record,(uint) STANDARD_LENGTH+rnd(60)); put_blob_in_record(record+blob_pos,&blob_buffer); @@ -262,8 +262,8 @@ int main(int argc, char *argv[]) for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ; if (!j) for (j=999 ; j>0 && key1[j] == 0 ; j--) ; - sprintf(key,"%6d",j); - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) + sprintf((char*) key,"%6d",j); + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) { printf("Test in loop: Can't find key: \"%s\"\n",key); goto err; @@ -290,8 +290,8 @@ int main(int argc, char *argv[]) for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ; if (j != 0) { - sprintf(key,"%6d",j); - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) + sprintf((char*) key,"%6d",j); + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) { printf("can't find key1: \"%s\"\n",key); goto err; @@ -304,8 +304,8 @@ int main(int argc, char *argv[]) goto err; } opt_delete++; - key1[atoi(read_record+keyinfo[0].seg[0].start)]--; - key3[atoi(read_record+keyinfo[2].seg[0].start)]=0; + key1[atoi((char*) read_record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) read_record+keyinfo[2].seg[0].start)]=0; } else puts("Warning: Skipping delete test because no dupplicate keys"); @@ -317,17 +317,17 @@ int main(int argc, char *argv[]) for (i=0 ; i<recant/10 ; i++) { n1=rnd(1000); n2=rnd(100); n3=rnd(5000); - sprintf(record2,"%6d:%4d:%8d:XXX: %4d ",n1,n2,n3,update); + sprintf((char*) record2,"%6d:%4d:%8d:XXX: %4d ",n1,n2,n3,update); int4store(record2+STANDARD_LENGTH-4,(long) i); fix_length(record2,(uint) STANDARD_LENGTH+rnd(60)); for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ; if (j != 0) { - sprintf(key,"%6d",j); - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) + sprintf((char*) key,"%6d",j); + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) { - printf("can't find key1: \"%s\"\n",key); + printf("can't find key1: \"%s\"\n",(char*) key); goto err; } if (use_blob) @@ -350,8 +350,8 @@ int main(int argc, char *argv[]) } else { - key1[atoi(read_record+keyinfo[0].seg[0].start)]--; - key3[atoi(read_record+keyinfo[2].seg[0].start)]=0; + key1[atoi((char*) read_record+keyinfo[0].seg[0].start)]--; + key3[atoi((char*) read_record+keyinfo[2].seg[0].start)]=0; key1[n1]++; key3[n3]=1; update++; } @@ -367,7 +367,7 @@ int main(int argc, char *argv[]) dupp_keys=key1[i]; j=i; } } - sprintf(key,"%6d",j); + sprintf((char*) key,"%6d",j); start=keyinfo[0].seg[0].start; length=keyinfo[0].seg[0].length; if (dupp_keys) @@ -377,7 +377,7 @@ int main(int argc, char *argv[]) DBUG_PRINT("progpos",("first - next -> last - prev -> first")); if (verbose) printf(" Using key: \"%s\" Keys: %d\n",key,dupp_keys); - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) goto err; if (mi_rsame(file,read_record2,-1)) goto err; @@ -422,7 +422,7 @@ int main(int argc, char *argv[]) } /* Check of mi_rnext_same */ - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) goto err; ant=1; while (!mi_rnext_same(file,read_record3) && ant < dupp_keys+10) @@ -455,8 +455,8 @@ int main(int argc, char *argv[]) bcmp(read_record2,read_record3,reclength)) { printf("Can't find last record\n"); - DBUG_DUMP("record2",(byte*) read_record2,reclength); - DBUG_DUMP("record3",(byte*) read_record3,reclength); + DBUG_DUMP("record2",(uchar*) read_record2,reclength); + DBUG_DUMP("record3",(uchar*) read_record3,reclength); goto end; } ant=1; @@ -496,7 +496,7 @@ int main(int argc, char *argv[]) goto err; if (bcmp(read_record2,read_record3,reclength)) printf("Can't find last record\n"); - +#ifdef NOT_ANYMORE if (!silent) puts("- Test read key-part"); strmov(key2,key); @@ -514,12 +514,14 @@ int main(int argc, char *argv[]) goto end; } } +#endif if (dupp_keys > 2) { if (!silent) printf("- Read key (first) - next - delete - next -> last\n"); DBUG_PRINT("progpos",("first - next - delete - next -> last")); - if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) goto err; + if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) + goto err; if (mi_rnext(file,read_record3,0)) goto err; if (mi_delete(file,read_record3)) goto err; opt_delete++; @@ -555,7 +557,8 @@ int main(int argc, char *argv[]) if (!silent) printf("- Read first - delete - next -> last\n"); DBUG_PRINT("progpos",("first - delete - next -> last")); - if (mi_rkey(file,read_record3,0,key,0,HA_READ_KEY_EXACT)) goto err; + if (mi_rkey(file,read_record3,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT)) + goto err; if (mi_delete(file,read_record3)) goto err; opt_delete++; ant=1; @@ -618,10 +621,10 @@ int main(int argc, char *argv[]) copy_key(file,(uint) i,(uchar*) read_record,(uchar*) key); copy_key(file,(uint) i,(uchar*) read_record2,(uchar*) key2); min_key.key= key; - min_key.length= USE_WHOLE_KEY; + min_key.keypart_map= HA_WHOLE_KEY; min_key.flag= HA_READ_KEY_EXACT; max_key.key= key2; - max_key.length= USE_WHOLE_KEY; + max_key.keypart_map= HA_WHOLE_KEY; max_key.flag= HA_READ_AFTER_KEY; range_records= mi_records_in_range(file,(int) i, &min_key, &max_key); @@ -649,8 +652,8 @@ int main(int argc, char *argv[]) key_range min_key, max_key; if (j > k) swap_variables(int, j, k); - sprintf(key,"%6d",j); - sprintf(key2,"%6d",k); + sprintf((char*) key,"%6d",j); + sprintf((char*) key2,"%6d",k); min_key.key= key; min_key.length= USE_WHOLE_KEY; @@ -1001,18 +1004,18 @@ static uint rnd(uint max_value) /* Create a variable length record */ -static void fix_length(byte *rec, uint length) +static void fix_length(uchar *rec, uint length) { bmove(rec+STANDARD_LENGTH, "0123456789012345678901234567890123456789012345678901234567890", length-STANDARD_LENGTH); - strfill(rec+length,STANDARD_LENGTH+60-length,' '); + strfill((char*) rec+length,STANDARD_LENGTH+60-length,' '); } /* fix_length */ /* Put maybe a blob in record */ -static void put_blob_in_record(char *blob_pos, char **blob_buffer) +static void put_blob_in_record(uchar *blob_pos, char **blob_buffer) { ulong i,length; if (use_blob) diff --git a/storage/myisam/mi_test3.c b/storage/myisam/mi_test3.c index 3987c20ab69..5bdc33b8518 100644 --- a/storage/myisam/mi_test3.c +++ b/storage/myisam/mi_test3.c @@ -48,9 +48,9 @@ int test_read(MI_INFO *,int),test_write(MI_INFO *,int,int), test_update(MI_INFO *,int,int),test_rrnd(MI_INFO *,int); struct record { - char id[8]; - char nr[4]; - char text[10]; + uchar id[8]; + uchar nr[4]; + uchar text[10]; } record; @@ -243,8 +243,8 @@ int test_read(MI_INFO *file,int id) for (i=0 ; i < 100 ; i++) { find=rnd(100000); - if (!mi_rkey(file,record.id,1,(byte*) &find, - sizeof(find),HA_READ_KEY_EXACT)) + if (!mi_rkey(file,record.id,1,(uchar*) &find, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) found++; else { @@ -362,8 +362,8 @@ int test_write(MI_INFO *file,int id,int lock_type) mi_extra(file,HA_EXTRA_WRITE_CACHE,0); } - sprintf(record.id,"%7d",getpid()); - strnmov(record.text,"Testing...", sizeof(record.text)); + sprintf((char*) record.id,"%7d",getpid()); + strnmov((char*) record.text,"Testing...", sizeof(record.text)); tries=(uint) rnd(100)+10; for (i=count=0 ; i < tries ; i++) @@ -419,15 +419,15 @@ int test_update(MI_INFO *file,int id,int lock_type) } } bzero((char*) &new_record,sizeof(new_record)); - strmov(new_record.text,"Updated"); + strmov((char*) new_record.text,"Updated"); found=next=prev=update=0; for (i=0 ; i < 100 ; i++) { tmp=rnd(100000); int4store(find,tmp); - if (!mi_rkey(file,record.id,1,(byte*) find, - sizeof(find),HA_READ_KEY_EXACT)) + if (!mi_rkey(file,record.id,1,(uchar*) find, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) found++; else { diff --git a/storage/myisam/mi_unique.c b/storage/myisam/mi_unique.c index 635f6c18247..e490fb683e4 100644 --- a/storage/myisam/mi_unique.c +++ b/storage/myisam/mi_unique.c @@ -18,7 +18,7 @@ #include "myisamdef.h" #include <m_ctype.h> -my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record, +my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record, ha_checksum unique_hash, my_off_t disk_pos) { my_off_t lastpos=info->lastpos; @@ -73,9 +73,9 @@ my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record, Add support for bit fields */ -ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) +ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *record) { - const byte *pos, *end; + const uchar *pos, *end; ha_checksum crc= 0; ulong seed1=0, seed2= 4; HA_KEYSEG *keyseg; @@ -111,7 +111,7 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) else if (keyseg->flag & HA_BLOB_PART) { uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos); - memcpy_fixed((byte*) &pos,pos+keyseg->bit_start,sizeof(char*)); + memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*)); if (!length || length > tmp_length) length=tmp_length; /* The whole blob */ } @@ -145,10 +145,10 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) # Rows are different */ -int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, +int mi_unique_comp(MI_UNIQUEDEF *def, const uchar *a, const uchar *b, my_bool null_are_equal) { - const byte *pos_a, *pos_b, *end; + const uchar *pos_a, *pos_b, *end; HA_KEYSEG *keyseg; for (keyseg=def->seg ; keyseg < def->end ; keyseg++) @@ -206,8 +206,8 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, set_if_smaller(a_length, keyseg->length); set_if_smaller(b_length, keyseg->length); } - memcpy_fixed((byte*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*)); - memcpy_fixed((byte*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*)); + memcpy_fixed((uchar*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*)); + memcpy_fixed((uchar*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*)); } if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || type == HA_KEYTYPE_VARTEXT2) diff --git a/storage/myisam/mi_update.c b/storage/myisam/mi_update.c index bea457d2e9a..956334b7806 100644 --- a/storage/myisam/mi_update.c +++ b/storage/myisam/mi_update.c @@ -18,7 +18,7 @@ #include "fulltext.h" #include "rt_index.h" -int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec) +int mi_update(register MI_INFO *info, const uchar *oldrec, uchar *newrec) { int flag,key_changed,save_errno; reg3 my_off_t pos; @@ -102,7 +102,7 @@ int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec) key_changed|=HA_STATE_WRITTEN; } changed|=((ulonglong) 1 << i); - if (_mi_ft_update(info,i,(char*) old_key,oldrec,newrec,pos)) + if (_mi_ft_update(info,i, old_key,oldrec,newrec,pos)) goto err; } } @@ -115,7 +115,7 @@ int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec) info->update&= ~HA_STATE_RNEXT_SAME; if (new_length != old_length || - memcmp((byte*) old_key,(byte*) new_key,new_length)) + memcmp((uchar*) old_key,(uchar*) new_key,new_length)) { if ((int) i == info->lastinx) key_changed|=HA_STATE_WRITTEN; /* Mark that keyfile changed */ @@ -207,8 +207,8 @@ err: { if (share->keyinfo[i].flag & HA_FULLTEXT) { - if ((flag++ && _mi_ft_del(info,i,(char*) new_key,newrec,pos)) || - _mi_ft_add(info,i,(char*) old_key,oldrec,pos)) + if ((flag++ && _mi_ft_del(info,i, new_key,newrec,pos)) || + _mi_ft_add(info,i, old_key,oldrec,pos)) break; } else diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index 57c054f2de8..719008d3513 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -40,7 +40,7 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr,uchar *key, /* Write new record to database */ -int mi_write(MI_INFO *info, byte *record) +int mi_write(MI_INFO *info, uchar *record) { MYISAM_SHARE *share=info->s; uint i; @@ -112,7 +112,7 @@ int mi_write(MI_INFO *info, byte *record) } if (share->keyinfo[i].flag & HA_FULLTEXT ) { - if (_mi_ft_add(info,i,(char*) buff,record,filepos)) + if (_mi_ft_add(info,i, buff, record, filepos)) { if (local_lock_tree) rw_unlock(&share->key_root_lock[i]); @@ -200,7 +200,7 @@ err: rw_wrlock(&share->key_root_lock[i]); if (share->keyinfo[i].flag & HA_FULLTEXT) { - if (_mi_ft_del(info,i,(char*) buff,record,filepos)) + if (_mi_ft_del(info,i, buff,record,filepos)) { if (local_lock_tree) rw_unlock(&share->key_root_lock[i]); @@ -286,7 +286,7 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr, uchar *key, if (!error) error= _mi_ft_convert_to_ft2(info, keynr, key); delete_dynamic(info->ft1_to_ft2); - my_free((gptr)info->ft1_to_ft2, MYF(0)); + my_free((uchar*)info->ft1_to_ft2, MYF(0)); info->ft1_to_ft2=0; } DBUG_RETURN(error); @@ -403,14 +403,14 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, ft_intXstore(keypos, subkeys); if (!error) error=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff); - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(error); } } else /* not HA_FULLTEXT, normal HA_NOSAME key */ { info->dupp_key_pos= dupp_key_pos; - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); my_errno=HA_ERR_FOUND_DUPP_KEY; DBUG_RETURN(-1); } @@ -429,10 +429,10 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff)) goto err; } - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(error); err: - my_afree((byte*) temp_buff); + my_afree((uchar*) temp_buff); DBUG_PRINT("exit",("Error: %d",my_errno)); DBUG_RETURN (-1); } /* w_search */ @@ -488,7 +488,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, if (key_pos != anc_buff+2+nod_flag && (keyinfo->flag & (HA_BINARY_PACK_KEY | HA_PACK_KEY))) { - DBUG_DUMP("prev_key",(byte*) key_buff,_mi_keylength(keyinfo,key_buff)); + DBUG_DUMP("prev_key",(uchar*) key_buff,_mi_keylength(keyinfo,key_buff)); } if (keyinfo->flag & HA_PACK_KEY) { @@ -506,7 +506,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_errno=HA_ERR_CRASHED; DBUG_RETURN(-1); } - bmove_upp((byte*) endpos+t_length,(byte*) endpos,(uint) (endpos-key_pos)); + bmove_upp((uchar*) endpos+t_length,(uchar*) endpos,(uint) (endpos-key_pos)); } else { @@ -562,7 +562,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, we cannot easily dispatch an empty page here */ b+=blen+ft2len+2; for (a=anc_buff+a_length ; b < a ; b+=ft2len+2) - insert_dynamic(info->ft1_to_ft2, (char*) b); + insert_dynamic(info->ft1_to_ft2, b); /* fixing the page's length - it contains only one key now */ mi_putint(anc_buff,2+blen+ft2len+2,0); @@ -595,7 +595,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo, MI_KEY_PARAM s_temp; DBUG_ENTER("mi_split_page"); LINT_INIT(after_key); - DBUG_DUMP("buff",(byte*) buff,mi_getint(buff)); + DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff)); if (info->s->keyinfo+info->lastinx == keyinfo) info->page_changed=1; /* Info->buff is used */ @@ -619,7 +619,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo, { DBUG_PRINT("test",("Splitting nod")); pos=key_pos-nod_flag; - memcpy((byte*) info->buff+2,(byte*) pos,(size_t) nod_flag); + memcpy((uchar*) info->buff+2,(uchar*) pos,(size_t) nod_flag); } /* Move middle item to key and pointer to new page */ @@ -635,14 +635,14 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo, (uchar*) 0, (uchar*) 0, key_buff, &s_temp); length=(uint) ((buff+a_length)-key_pos); - memcpy((byte*) info->buff+key_ref_length+t_length,(byte*) key_pos, + memcpy((uchar*) info->buff+key_ref_length+t_length,(uchar*) key_pos, (size_t) length); (*keyinfo->store_key)(keyinfo,info->buff+key_ref_length,&s_temp); mi_putint(info->buff,length+t_length+key_ref_length,nod_flag); if (_mi_write_keypage(info,keyinfo,new_pos,DFLT_INIT_HITS,info->buff)) DBUG_RETURN(-1); - DBUG_DUMP("key",(byte*) key,_mi_keylength(keyinfo,key)); + DBUG_DUMP("key",(uchar*) key,_mi_keylength(keyinfo,key)); DBUG_RETURN(2); /* Middle key up */ } /* _mi_split_page */ @@ -764,7 +764,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, length,keys; uchar *pos,*buff,*extra_buff; my_off_t next_page,new_pos; - byte tmp_part_key[MI_MAX_KEY_BUFF]; + uchar tmp_part_key[MI_MAX_KEY_BUFF]; DBUG_ENTER("_mi_balance_page"); k_length=keyinfo->keylength; @@ -796,7 +796,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff,0)) goto err; - DBUG_DUMP("next",(byte*) info->buff,mi_getint(info->buff)); + DBUG_DUMP("next",(uchar*) info->buff,mi_getint(info->buff)); /* Test if there is room to share keys */ @@ -815,23 +815,23 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, if (left_length < new_left_length) { /* Move keys buff -> leaf */ pos=curr_buff+left_length; - memcpy((byte*) pos,(byte*) father_key_pos, (size_t) k_length); - memcpy((byte*) pos+k_length, (byte*) buff+2, + memcpy((uchar*) pos,(uchar*) father_key_pos, (size_t) k_length); + memcpy((uchar*) pos+k_length, (uchar*) buff+2, (size_t) (length=new_left_length - left_length - k_length)); pos=buff+2+length; - memcpy((byte*) father_key_pos,(byte*) pos,(size_t) k_length); - bmove((byte*) buff+2,(byte*) pos+k_length,new_right_length); + memcpy((uchar*) father_key_pos,(uchar*) pos,(size_t) k_length); + bmove((uchar*) buff+2,(uchar*) pos+k_length,new_right_length); } else { /* Move keys -> buff */ - bmove_upp((byte*) buff+new_right_length,(byte*) buff+right_length, + bmove_upp((uchar*) buff+new_right_length,(uchar*) buff+right_length, right_length-2); length=new_right_length-right_length-k_length; - memcpy((byte*) buff+2+length,father_key_pos,(size_t) k_length); + memcpy((uchar*) buff+2+length,father_key_pos,(size_t) k_length); pos=curr_buff+new_left_length; - memcpy((byte*) father_key_pos,(byte*) pos,(size_t) k_length); - memcpy((byte*) buff+2,(byte*) pos+k_length,(size_t) length); + memcpy((uchar*) father_key_pos,(uchar*) pos,(size_t) k_length); + memcpy((uchar*) buff+2,(uchar*) pos+k_length,(size_t) length); } if (_mi_write_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff) || @@ -858,22 +858,22 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, /* move first largest keys to new page */ pos=buff+right_length-extra_length; - memcpy((byte*) extra_buff+2,pos,(size_t) extra_length); + memcpy((uchar*) extra_buff+2,pos,(size_t) extra_length); /* Save new parting key */ memcpy(tmp_part_key, pos-k_length,k_length); /* Make place for new keys */ - bmove_upp((byte*) buff+new_right_length,(byte*) pos-k_length, + bmove_upp((uchar*) buff+new_right_length,(uchar*) pos-k_length, right_length-extra_length-k_length-2); /* Copy keys from left page */ pos= curr_buff+new_left_length; - memcpy((byte*) buff+2,(byte*) pos+k_length, + memcpy((uchar*) buff+2,(uchar*) pos+k_length, (size_t) (length=left_length-new_left_length-k_length)); /* Copy old parting key */ - memcpy((byte*) buff+2+length,father_key_pos,(size_t) k_length); + memcpy((uchar*) buff+2+length,father_key_pos,(size_t) k_length); /* Move new parting keys up to caller */ - memcpy((byte*) (right ? key : father_key_pos),pos,(size_t) k_length); - memcpy((byte*) (right ? father_key_pos : key),tmp_part_key, k_length); + memcpy((uchar*) (right ? key : father_key_pos),pos,(size_t) k_length); + memcpy((uchar*) (right ? father_key_pos : key),tmp_part_key, k_length); if ((new_pos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR) goto err; diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c index 4bc1833cca6..63d954242a0 100644 --- a/storage/myisam/myisam_ftdump.c +++ b/storage/myisam/myisam_ftdump.c @@ -46,7 +46,7 @@ static struct my_option my_long_options[] = {"stats", 's', "Report global stats.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Be verbose.", - (gptr*) &verbose, (gptr*) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + (uchar**) &verbose, (uchar**) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index bb4ae9a97ec..567e1057e5d 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -68,9 +68,9 @@ static void get_options(int *argc,char * * *argv); static void print_version(void); static void usage(void); static int myisamchk(MI_CHECK *param, char *filename); -static void descript(MI_CHECK *param, register MI_INFO *info, my_string name); +static void descript(MI_CHECK *param, register MI_INFO *info, char * name); static int mi_sort_records(MI_CHECK *param, register MI_INFO *info, - my_string name, uint sort_key, + char * name, uint sort_key, my_bool write_info, my_bool update_index); static int sort_record_index(MI_SORT_PARAM *sort_param, MI_INFO *info, MI_KEYDEF *keyinfo, @@ -167,7 +167,7 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory where character sets are.", - (gptr*) &charsets_dir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + (uchar**) &charsets_dir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"check", 'c', "Check table for errors.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -187,8 +187,8 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"data-file-length", 'D', "Max length of data file (when recreating data-file when it's full).", - (gptr*) &check_param.max_data_file_length, - (gptr*) &check_param.max_data_file_length, + (uchar**) &check_param.max_data_file_length, + (uchar**) &check_param.max_data_file_length, 0, GET_LL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"extend-check", 'e', "If used when checking a table, ensure that the table is 100 percent consistent, which will take a long time. If used when repairing a table, try to recover every possible row from the data file. Normally this will also find a lot of garbage rows; Don't use this option with repair if you are not totally desperate.", @@ -210,13 +210,13 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"keys-used", 'k', "Tell MyISAM to update only some specific keys. # is a bit mask of which keys to use. This can be used to get faster inserts.", - (gptr*) &check_param.keys_in_use, - (gptr*) &check_param.keys_in_use, + (uchar**) &check_param.keys_in_use, + (uchar**) &check_param.keys_in_use, 0, GET_ULL, REQUIRED_ARG, -1, 0, 0, 0, 0, 0}, {"max-record-length", OPT_MAX_RECORD_LENGTH, "Skip rows bigger than this if myisamchk can't allocate memory to hold it", - (gptr*) &check_param.max_record_length, - (gptr*) &check_param.max_record_length, + (uchar**) &check_param.max_record_length, + (uchar**) &check_param.max_record_length, 0, GET_ULL, REQUIRED_ARG, LONGLONG_MAX, 0, LONGLONG_MAX, 0, 0, 0}, {"medium-check", 'm', "Faster than extend-check, but only finds 99.99% of all errors. Should be good enough for most cases.", @@ -245,12 +245,12 @@ static struct my_option my_long_options[] = #endif {"set-auto-increment", 'A', "Force auto_increment to start at this or higher value. If no value is given, then sets the next auto_increment value to the highest used value for the auto key + 1.", - (gptr*) &check_param.auto_increment_value, - (gptr*) &check_param.auto_increment_value, + (uchar**) &check_param.auto_increment_value, + (uchar**) &check_param.auto_increment_value, 0, GET_ULL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"set-collation", OPT_SET_COLLATION, "Change the collation used by the index", - (gptr*) &set_collation_name, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + (uchar**) &set_collation_name, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"set-variable", 'O', "Change the value of a variable. Please note that this option is deprecated; you can set variables directly with --variable-name=value.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -262,12 +262,12 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sort-records", 'R', "Sort records according to an index. This makes your data much more localized and may speed up things. (It may be VERY slow to do a sort the first time!)", - (gptr*) &check_param.opt_sort_key, - (gptr*) &check_param.opt_sort_key, + (uchar**) &check_param.opt_sort_key, + (uchar**) &check_param.opt_sort_key, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files.", - (gptr*) &opt_tmpdir, + (uchar**) &opt_tmpdir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"update-state", 'U', "Mark tables as crashed if any errors were found.", @@ -285,54 +285,54 @@ static struct my_option my_long_options[] = "Wait if table is locked.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, { "key_buffer_size", OPT_KEY_BUFFER_SIZE, "", - (gptr*) &check_param.use_buffers, (gptr*) &check_param.use_buffers, 0, + (uchar**) &check_param.use_buffers, (uchar**) &check_param.use_buffers, 0, GET_ULONG, REQUIRED_ARG, (long) USE_BUFFER_INIT, (long) MALLOC_OVERHEAD, (long) ~0L, (long) MALLOC_OVERHEAD, (long) IO_SIZE, 0}, { "key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "", - (gptr*) &opt_key_cache_block_size, - (gptr*) &opt_key_cache_block_size, 0, + (uchar**) &opt_key_cache_block_size, + (uchar**) &opt_key_cache_block_size, 0, GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, { "myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "", - (gptr*) &opt_myisam_block_size, (gptr*) &opt_myisam_block_size, 0, + (uchar**) &opt_myisam_block_size, (uchar**) &opt_myisam_block_size, 0, GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, { "read_buffer_size", OPT_READ_BUFFER_SIZE, "", - (gptr*) &check_param.read_buffer_length, - (gptr*) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG, + (uchar**) &check_param.read_buffer_length, + (uchar**) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG, (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD, (long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "", - (gptr*) &check_param.write_buffer_length, - (gptr*) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG, + (uchar**) &check_param.write_buffer_length, + (uchar**) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG, (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD, (long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "sort_buffer_size", OPT_SORT_BUFFER_SIZE, "", - (gptr*) &check_param.sort_buffer_length, - (gptr*) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG, + (uchar**) &check_param.sort_buffer_length, + (uchar**) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG, (long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), (long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "", - (gptr*) &check_param.sort_key_blocks, - (gptr*) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG, + (uchar**) &check_param.sort_key_blocks, + (uchar**) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG, BUFFERS_WHEN_SORTING, 4L, 100L, 0L, 1L, 0}, - { "decode_bits", OPT_DECODE_BITS, "", (gptr*) &decode_bits, - (gptr*) &decode_bits, 0, GET_UINT, REQUIRED_ARG, 9L, 4L, 17L, 0L, 1L, 0}, - { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, "", (gptr*) &ft_min_word_len, - (gptr*) &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, + { "decode_bits", OPT_DECODE_BITS, "", (uchar**) &decode_bits, + (uchar**) &decode_bits, 0, GET_UINT, REQUIRED_ARG, 9L, 4L, 17L, 0L, 1L, 0}, + { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, "", (uchar**) &ft_min_word_len, + (uchar**) &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0}, - { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", (gptr*) &ft_max_word_len, - (gptr*) &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, + { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", (uchar**) &ft_max_word_len, + (uchar**) &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0}, { "ft_stopword_file", OPT_FT_STOPWORD_FILE, "Use stopwords from this file instead of built-in list.", - (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR, + (uchar**) &ft_stopword_file, (uchar**) &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"stats_method", OPT_STATS_METHOD, - "Specifies how index statistics collection code should threat NULLs. " + "Specifies how index statistics collection code should treat NULLs. " "Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), " "\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".", - (gptr*) &myisam_stats_method_str, (gptr*) &myisam_stats_method_str, 0, + (uchar**) &myisam_stats_method_str, (uchar**) &myisam_stats_method_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -444,7 +444,7 @@ static void usage(void) MySQL faster. You can check the calculated distribution\n\ by using '--description --verbose table_name'.\n\ --stats_method=name Specifies how index statistics collection code should\n\ - threat NULLs. Possible values of name are \"nulls_unequal\"\n\ + treat NULLs. Possible values of name are \"nulls_unequal\"\n\ (default for 4.1/5.0), \"nulls_equal\" (emulate 4.0), and \n\ \"nulls_ignored\".\n\ -d, --description Prints some information about table.\n\ @@ -713,6 +713,7 @@ get_one_option(int optid, case 2: method_conv= MI_STATS_METHOD_IGNORE_NULLS; break; + default: assert(0); /* Impossible */ } check_param.stats_method= method_conv; break; @@ -793,7 +794,7 @@ static void get_options(register int *argc,register char ***argv) /* Check table */ -static int myisamchk(MI_CHECK *param, my_string filename) +static int myisamchk(MI_CHECK *param, char * filename) { int error,lock_type,recreate; int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS); @@ -1198,7 +1199,7 @@ end2: /* Write info about table */ -static void descript(MI_CHECK *param, register MI_INFO *info, my_string name) +static void descript(MI_CHECK *param, register MI_INFO *info, char * name) { uint key,keyseg_nr,field,start; reg3 MI_KEYDEF *keyinfo; @@ -1464,7 +1465,7 @@ static void descript(MI_CHECK *param, register MI_INFO *info, my_string name) /* Sort records according to one key */ static int mi_sort_records(MI_CHECK *param, - register MI_INFO *info, my_string name, + register MI_INFO *info, char * name, uint sort_key, my_bool write_info, my_bool update_index) @@ -1535,7 +1536,7 @@ static int mi_sort_records(MI_CHECK *param, mi_check_print_error(param,"Not enough memory for key block"); goto err; } - if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength, + if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength, MYF(0)))) { mi_check_print_error(param,"Not enough memory for record"); @@ -1566,7 +1567,7 @@ static int mi_sort_records(MI_CHECK *param, for (key=0 ; key < share->base.keys ; key++) share->keyinfo[key].flag|= HA_SORT_ALLOWS_SAME; - if (my_pread(share->kfile,(byte*) temp_buff, + if (my_pread(share->kfile,(uchar*) temp_buff, (uint) keyinfo->block_length, share->state.key_root[sort_key], MYF(MY_NABP+MY_WME))) @@ -1629,7 +1630,7 @@ err: } if (temp_buff) { - my_afree((gptr) temp_buff); + my_afree((uchar*) temp_buff); } my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR)); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); @@ -1678,7 +1679,7 @@ static int sort_record_index(MI_SORT_PARAM *sort_param,MI_INFO *info, if (nod_flag) { next_page=_mi_kpos(nod_flag,keypos); - if (my_pread(info->s->kfile,(byte*) temp_buff, + if (my_pread(info->s->kfile,(uchar*) temp_buff, (uint) keyinfo->block_length, next_page, MYF(MY_NABP+MY_WME))) { @@ -1717,19 +1718,19 @@ static int sort_record_index(MI_SORT_PARAM *sort_param,MI_INFO *info, goto err; } /* Clear end of block to get better compression if the table is backuped */ - bzero((byte*) buff+used_length,keyinfo->block_length-used_length); - if (my_pwrite(info->s->kfile,(byte*) buff,(uint) keyinfo->block_length, + bzero((uchar*) buff+used_length,keyinfo->block_length-used_length); + if (my_pwrite(info->s->kfile,(uchar*) buff,(uint) keyinfo->block_length, page,param->myf_rw)) { mi_check_print_error(param,"%d when updating keyblock",my_errno); goto err; } if (temp_buff) - my_afree((gptr) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(0); err: if (temp_buff) - my_afree((gptr) temp_buff); + my_afree((uchar*) temp_buff); DBUG_RETURN(1); } /* sort_record_index */ diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index dceccd10ae2..721d6b9f271 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -167,22 +167,22 @@ typedef struct st_mi_isam_share { /* Shared between opens */ char *unique_file_name; /* realpath() of index file */ char *data_file_name, /* Resolved path names from symlinks */ *index_file_name; - byte *file_map; /* mem-map of file if possible */ + uchar *file_map; /* mem-map of file if possible */ KEY_CACHE *key_cache; /* ref to the current key cache */ MI_DECODE_TREE *decode_trees; uint16 *decode_tables; - int (*read_record)(struct st_myisam_info*, my_off_t, byte*); - int (*write_record)(struct st_myisam_info*, const byte*); - int (*update_record)(struct st_myisam_info*, my_off_t, const byte*); + int (*read_record)(struct st_myisam_info*, my_off_t, uchar*); + int (*write_record)(struct st_myisam_info*, const uchar*); + int (*update_record)(struct st_myisam_info*, my_off_t, const uchar*); int (*delete_record)(struct st_myisam_info*); - int (*read_rnd)(struct st_myisam_info*, byte*, my_off_t, my_bool); - int (*compare_record)(struct st_myisam_info*, const byte *); + int (*read_rnd)(struct st_myisam_info*, uchar*, my_off_t, my_bool); + int (*compare_record)(struct st_myisam_info*, const uchar *); /* Function to use for a row checksum. */ - ha_checksum (*calc_checksum)(struct st_myisam_info*, const byte *); + ha_checksum (*calc_checksum)(struct st_myisam_info*, const uchar *); int (*compare_unique)(struct st_myisam_info*, MI_UNIQUEDEF *, - const byte *record, my_off_t pos); - uint (*file_read)(MI_INFO *, byte *, uint, my_off_t, myf); - uint (*file_write)(MI_INFO *, byte *, uint, my_off_t, myf); + const uchar *record, my_off_t pos); + size_t (*file_read)(MI_INFO *, uchar *, size_t, my_off_t, myf); + size_t (*file_write)(MI_INFO *, const uchar *, size_t, my_off_t, myf); invalidator_by_filename invalidator; /* query cache invalidator */ ulong this_process; /* processid */ ulong last_process; /* For table-change-check */ @@ -245,12 +245,12 @@ struct st_myisam_info { uchar *buff, /* Temp area for key */ *lastkey,*lastkey2; /* Last used search key */ uchar *first_mbr_key; /* Searhed spatial key */ - byte *rec_buff; /* Tempbuff for recordpack */ + uchar *rec_buff; /* Tempbuff for recordpack */ uchar *int_keypos, /* Save position for next/previous */ *int_maxpos; /* -""- */ uint int_nod_flag; /* -""- */ uint32 int_keytree_version; /* -""- */ - int (*read_record)(struct st_myisam_info*, my_off_t, byte*); + int (*read_record)(struct st_myisam_info*, my_off_t, uchar*); invalidator_by_filename invalidator; /* query cache invalidator */ ulong this_unique; /* uniq filenumber or thread */ ulong last_unique; /* last unique number */ @@ -276,6 +276,7 @@ struct st_myisam_info { enum ha_rkey_function last_key_func; /* CONTAIN, OVERLAP, etc */ uint save_lastkey_length; uint pack_key_length; /* For MYISAMMRG */ + uint16 last_used_keyseg; /* For MyISAMMRG */ int errkey; /* Got last error on this key */ int lock_type; /* How database was locked */ int tmp_lock_type; /* When locked by readinfo */ @@ -333,10 +334,10 @@ typedef struct st_mi_sort_param HA_KEYSEG *seg; SORT_INFO *sort_info; uchar **sort_keys; - byte *rec_buff; + uchar *rec_buff; void *wordlist, *wordptr; MEM_ROOT wordroot; - char *record; + uchar *record; MY_TMPDIR *tmpdir; int (*key_cmp)(struct st_mi_sort_param *, const void *, const void *); int (*key_read)(struct st_mi_sort_param *,void *); @@ -345,9 +346,10 @@ typedef struct st_mi_sort_param NEAR int (*write_keys)(struct st_mi_sort_param *, register uchar **, uint , struct st_buffpek *, IO_CACHE *); NEAR uint (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint); - NEAR int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,char *, + NEAR int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,uchar *, uint, uint); } MI_SORT_PARAM; + /* Some defines used by isam-funktions */ #define USE_WHOLE_KEY MI_MAX_KEY_BUFF*2 /* Use whole key in _mi_search() */ @@ -496,20 +498,20 @@ typedef struct st_mi_s_param /* Prototypes for intern functions */ -extern int _mi_read_dynamic_record(MI_INFO *info,my_off_t filepos,byte *buf); -extern int _mi_write_dynamic_record(MI_INFO*, const byte*); -extern int _mi_update_dynamic_record(MI_INFO*, my_off_t, const byte*); +extern int _mi_read_dynamic_record(MI_INFO *info,my_off_t filepos,uchar *buf); +extern int _mi_write_dynamic_record(MI_INFO*, const uchar*); +extern int _mi_update_dynamic_record(MI_INFO*, my_off_t, const uchar*); extern int _mi_delete_dynamic_record(MI_INFO *info); -extern int _mi_cmp_dynamic_record(MI_INFO *info,const byte *record); -extern int _mi_read_rnd_dynamic_record(MI_INFO *, byte *,my_off_t, my_bool); -extern int _mi_write_blob_record(MI_INFO*, const byte*); -extern int _mi_update_blob_record(MI_INFO*, my_off_t, const byte*); -extern int _mi_read_static_record(MI_INFO *info, my_off_t filepos,byte *buf); -extern int _mi_write_static_record(MI_INFO*, const byte*); -extern int _mi_update_static_record(MI_INFO*, my_off_t, const byte*); +extern int _mi_cmp_dynamic_record(MI_INFO *info,const uchar *record); +extern int _mi_read_rnd_dynamic_record(MI_INFO *, uchar *,my_off_t, my_bool); +extern int _mi_write_blob_record(MI_INFO*, const uchar*); +extern int _mi_update_blob_record(MI_INFO*, my_off_t, const uchar*); +extern int _mi_read_static_record(MI_INFO *info, my_off_t filepos,uchar *buf); +extern int _mi_write_static_record(MI_INFO*, const uchar*); +extern int _mi_update_static_record(MI_INFO*, my_off_t, const uchar*); extern int _mi_delete_static_record(MI_INFO *info); -extern int _mi_cmp_static_record(MI_INFO *info,const byte *record); -extern int _mi_read_rnd_static_record(MI_INFO*, byte *,my_off_t, my_bool); +extern int _mi_cmp_static_record(MI_INFO *info,const uchar *record); +extern int _mi_read_rnd_static_record(MI_INFO*, uchar *,my_off_t, my_bool); extern int _mi_ck_write(MI_INFO *info,uint keynr,uchar *key,uint length); extern int _mi_ck_real_write_btree(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, uint key_length, @@ -603,38 +605,39 @@ extern int _mi_dispose(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos, int level); extern my_off_t _mi_new(MI_INFO *info,MI_KEYDEF *keyinfo,int level); extern uint _mi_make_key(MI_INFO *info,uint keynr,uchar *key, - const byte *record,my_off_t filepos); -extern uint _mi_pack_key(MI_INFO *info,uint keynr,uchar *key,uchar *old, - uint key_length, HA_KEYSEG **last_used_keyseg); -extern int _mi_read_key_record(MI_INFO *info,my_off_t filepos,byte *buf); -extern int _mi_read_cache(IO_CACHE *info,byte *buff,my_off_t pos, + const uchar *record,my_off_t filepos); +extern uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, + uchar *old, key_part_map keypart_map, + HA_KEYSEG **last_used_keyseg); +extern int _mi_read_key_record(MI_INFO *info,my_off_t filepos,uchar *buf); +extern int _mi_read_cache(IO_CACHE *info,uchar *buff,my_off_t pos, uint length,int re_read_if_possibly); -extern ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record); +extern ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record); -extern byte *mi_alloc_rec_buff(MI_INFO *,ulong, byte**); +extern uchar *mi_alloc_rec_buff(MI_INFO *,ulong, uchar**); #define mi_get_rec_buff_ptr(info,buf) \ ((((info)->s->options & HA_OPTION_PACK_RECORD) && (buf)) ? \ (buf) - MI_REC_BUFF_OFFSET : (buf)) #define mi_get_rec_buff_len(info,buf) \ (*((uint32 *)(mi_get_rec_buff_ptr(info,buf)))) -extern ulong _mi_rec_unpack(MI_INFO *info,byte *to,byte *from, +extern ulong _mi_rec_unpack(MI_INFO *info,uchar *to,uchar *from, ulong reclength); -extern my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *packpos, +extern my_bool _mi_rec_check(MI_INFO *info,const uchar *record, uchar *packpos, ulong packed_length, my_bool with_checkum); extern int _mi_write_part_record(MI_INFO *info,my_off_t filepos,ulong length, - my_off_t next_filepos,byte **record, + my_off_t next_filepos,uchar **record, ulong *reclength,int *flag); extern void _mi_print_key(FILE *stream,HA_KEYSEG *keyseg,const uchar *key, uint length); extern my_bool _mi_read_pack_info(MI_INFO *info,pbool fix_keys); -extern int _mi_read_pack_record(MI_INFO *info,my_off_t filepos,byte *buf); -extern int _mi_read_rnd_pack_record(MI_INFO*, byte *,my_off_t, my_bool); +extern int _mi_read_pack_record(MI_INFO *info,my_off_t filepos,uchar *buf); +extern int _mi_read_rnd_pack_record(MI_INFO*, uchar *,my_off_t, my_bool); extern int _mi_pack_rec_unpack(MI_INFO *info, MI_BIT_BUFF *bit_buff, - byte *to, byte *from, ulong reclength); + uchar *to, uchar *from, ulong reclength); extern ulonglong mi_safe_mul(ulonglong a,ulonglong b); -extern int _mi_ft_update(MI_INFO *info, uint keynr, byte *keybuf, - const byte *oldrec, const byte *newrec, my_off_t pos); +extern int _mi_ft_update(MI_INFO *info, uint keynr, uchar *keybuf, + const uchar *oldrec, const uchar *newrec, my_off_t pos); struct st_sort_info; @@ -695,33 +698,33 @@ extern "C" { #endif extern uint _mi_get_block_info(MI_BLOCK_INFO *,File, my_off_t); -extern uint _mi_rec_pack(MI_INFO *info,byte *to,const byte *from); +extern uint _mi_rec_pack(MI_INFO *info,uchar *to,const uchar *from); extern uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, - MI_BLOCK_INFO *info, byte **rec_buff_p, + MI_BLOCK_INFO *info, uchar **rec_buff_p, File file, my_off_t filepos); -extern void _my_store_blob_length(byte *pos,uint pack_length,uint length); +extern void _my_store_blob_length(uchar *pos,uint pack_length,uint length); extern void _myisam_log(enum myisam_log_commands command,MI_INFO *info, - const byte *buffert,uint length); + const uchar *buffert,uint length); extern void _myisam_log_command(enum myisam_log_commands command, - MI_INFO *info, const byte *buffert, + MI_INFO *info, const uchar *buffert, uint length, int result); extern void _myisam_log_record(enum myisam_log_commands command,MI_INFO *info, - const byte *record,my_off_t filepos, + const uchar *record,my_off_t filepos, int result); extern void mi_report_error(int errcode, const char *file_name); extern my_bool _mi_memmap_file(MI_INFO *info); extern void _mi_unmap_file(MI_INFO *info); -extern uint save_pack_length(uint version, byte *block_buff, ulong length); +extern uint save_pack_length(uint version, uchar *block_buff, ulong length); extern uint read_pack_length(uint version, const uchar *buf, ulong *length); extern uint calc_pack_length(uint version, ulong length); -extern uint mi_mmap_pread(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags); -extern uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags); -extern uint mi_nommap_pread(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags); -extern uint mi_nommap_pwrite(MI_INFO *info, byte *Buffer, - uint Count, my_off_t offset, myf MyFlags); +extern size_t mi_mmap_pread(MI_INFO *info, uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags); +extern size_t mi_mmap_pwrite(MI_INFO *info, const uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags); +extern size_t mi_nommap_pread(MI_INFO *info, uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags); +extern size_t mi_nommap_pwrite(MI_INFO *info, const uchar *Buffer, + size_t Count, my_off_t offset, myf MyFlags); uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite); uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state); @@ -729,27 +732,27 @@ uint mi_state_info_read_dsk(File file, MI_STATE_INFO *state, my_bool pRead); uint mi_base_info_write(File file, MI_BASE_INFO *base); uchar *my_n_base_info_read(uchar *ptr, MI_BASE_INFO *base); int mi_keyseg_write(File file, const HA_KEYSEG *keyseg); -char *mi_keyseg_read(char *ptr, HA_KEYSEG *keyseg); +uchar *mi_keyseg_read(uchar *ptr, HA_KEYSEG *keyseg); uint mi_keydef_write(File file, MI_KEYDEF *keydef); -char *mi_keydef_read(char *ptr, MI_KEYDEF *keydef); +uchar *mi_keydef_read(uchar *ptr, MI_KEYDEF *keydef); uint mi_uniquedef_write(File file, MI_UNIQUEDEF *keydef); -char *mi_uniquedef_read(char *ptr, MI_UNIQUEDEF *keydef); +uchar *mi_uniquedef_read(uchar *ptr, MI_UNIQUEDEF *keydef); uint mi_recinfo_write(File file, MI_COLUMNDEF *recinfo); -char *mi_recinfo_read(char *ptr, MI_COLUMNDEF *recinfo); +uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo); extern int mi_disable_indexes(MI_INFO *info); extern int mi_enable_indexes(MI_INFO *info); extern int mi_indexes_are_disabled(MI_INFO *info); -ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record); -ha_checksum mi_checksum(MI_INFO *info, const byte *buf); -ha_checksum mi_static_checksum(MI_INFO *info, const byte *buf); -my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record, +ulong _my_calc_total_blob_length(MI_INFO *info, const uchar *record); +ha_checksum mi_checksum(MI_INFO *info, const uchar *buf); +ha_checksum mi_static_checksum(MI_INFO *info, const uchar *buf); +my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record, ha_checksum unique_hash, my_off_t pos); -ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *buf); +ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *buf); int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def, - const byte *record, my_off_t pos); + const uchar *record, my_off_t pos); int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def, - const byte *record, my_off_t pos); -int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, + const uchar *record, my_off_t pos); +int mi_unique_comp(MI_UNIQUEDEF *def, const uchar *a, const uchar *b, my_bool null_are_equal); void mi_get_status(void* param, int concurrent_insert); void mi_update_status(void* param); diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c index 0bcf74d87a4..6566a7a7a02 100644 --- a/storage/myisam/myisamlog.c +++ b/storage/myisam/myisamlog.c @@ -32,14 +32,15 @@ struct file_info { long process; int filenr,id; uint rnd; - my_string name,show_name,record; + char *name, *show_name; + uchar *record; MI_INFO *isam; bool closed,used; ulong accessed; }; struct test_if_open_param { - my_string name; + char * name; int max_id; }; @@ -53,24 +54,25 @@ struct st_access_param extern int main(int argc,char * *argv); static void get_options(int *argc,char ***argv); -static int examine_log(my_string file_name,char **table_names); -static int read_string(IO_CACHE *file,gptr *to,uint length); +static int examine_log(char * file_name,char **table_names); +static int read_string(IO_CACHE *file,uchar* *to,uint length); static int file_info_compare(void *cmp_arg, void *a,void *b); static int test_if_open(struct file_info *key,element_count count, struct test_if_open_param *param); -static void fix_blob_pointers(MI_INFO *isam,byte *record); +static void fix_blob_pointers(MI_INFO *isam,uchar *record); static int test_when_accessed(struct file_info *key,element_count count, struct st_access_param *access_param); static void file_info_free(struct file_info *info); static int close_some_file(TREE *tree); static int reopen_closed_file(TREE *tree,struct file_info *file_info); -static int find_record_with_key(struct file_info *file_info,byte *record); +static int find_record_with_key(struct file_info *file_info,uchar *record); static void printf_log(const char *str,...); -static bool cmp_filename(struct file_info *file_info,my_string name); +static bool cmp_filename(struct file_info *file_info,char * name); static uint verbose=0,update=0,test_info=0,max_files=0,re_open_count=0, recover=0,prefix_remove=0,opt_processes=0; -static my_string log_filename=0,filepath=0,write_filename=0,record_pos_file=0; +static char *log_filename=0, *filepath=0, *write_filename=0; +static char *record_pos_file= 0; static ulong com_count[10][3],number_of_commands=(ulong) ~0L, isamlog_process; static my_off_t isamlog_filepos,start_offset=0,record_pos= HA_OFFSET_ERROR; @@ -296,7 +298,7 @@ static void get_options(register int *argc, register char ***argv) } -static int examine_log(my_string file_name, char **table_names) +static int examine_log(char * file_name, char **table_names) { uint command,result,files_open; ulong access_time,length; @@ -304,7 +306,7 @@ static int examine_log(my_string file_name, char **table_names) int lock_command,mi_result; char isam_file_name[FN_REFLEN],llbuff[21],llbuff2[21]; uchar head[20]; - gptr buff; + uchar* buff; struct test_if_open_param open_param; IO_CACHE cache; File file; @@ -327,7 +329,7 @@ static int examine_log(my_string file_name, char **table_names) } init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0)); - bzero((gptr) com_count,sizeof(com_count)); + bzero((uchar*) com_count,sizeof(com_count)); init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1, (tree_element_free) file_info_free, NULL); VOID(init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE, @@ -335,7 +337,7 @@ static int examine_log(my_string file_name, char **table_names) files_open=0; access_time=0; while (access_time++ != number_of_commands && - !my_b_read(&cache,(byte*) head,9)) + !my_b_read(&cache,(uchar*) head,9)) { isamlog_filepos=my_b_tell(&cache)-9L; file_info.filenr= mi_uint2korr(head+1); @@ -375,14 +377,15 @@ static int examine_log(my_string file_name, char **table_names) } if (curr_file_info) - printf("\nWarning: %s is opened with same process and filenumber\nMaybe you should use the -P option ?\n", + printf("\nWarning: %s is opened with same process and filenumber\n" + "Maybe you should use the -P option ?\n", curr_file_info->show_name); - if (my_b_read(&cache,(byte*) head,2)) + if (my_b_read(&cache,(uchar*) head,2)) goto err; file_info.name=0; file_info.show_name=0; file_info.record=0; - if (read_string(&cache,(gptr*) &file_info.name, + if (read_string(&cache,(uchar**) &file_info.name, (uint) mi_uint2korr(head))) goto err; { @@ -455,7 +458,7 @@ static int examine_log(my_string file_name, char **table_names) files_open++; file_info.closed=0; } - VOID(tree_insert(&tree, (gptr) &file_info, 0, tree.custom_arg)); + VOID(tree_insert(&tree, (uchar*) &file_info, 0, tree.custom_arg)); if (file_info.used) { if (verbose && !record_pos_file) @@ -474,11 +477,11 @@ static int examine_log(my_string file_name, char **table_names) { if (!curr_file_info->closed) files_open--; - VOID(tree_delete(&tree, (gptr) curr_file_info, 0, tree.custom_arg)); + VOID(tree_delete(&tree, (uchar*) curr_file_info, 0, tree.custom_arg)); } break; case MI_LOG_EXTRA: - if (my_b_read(&cache,(byte*) head,1)) + if (my_b_read(&cache,(uchar*) head,1)) goto err; extra_command=(enum ha_extra_function) head[0]; if (verbose && !record_pos_file && @@ -499,7 +502,7 @@ static int examine_log(my_string file_name, char **table_names) } break; case MI_LOG_DELETE: - if (my_b_read(&cache,(byte*) head,8)) + if (my_b_read(&cache,(uchar*) head,8)) goto err; filepos=mi_sizekorr(head); if (verbose && (!record_pos_file || @@ -534,7 +537,7 @@ static int examine_log(my_string file_name, char **table_names) break; case MI_LOG_WRITE: case MI_LOG_UPDATE: - if (my_b_read(&cache,(byte*) head,12)) + if (my_b_read(&cache,(uchar*) head,12)) goto err; filepos=mi_sizekorr(head); length=mi_uint4korr(head+8); @@ -616,7 +619,7 @@ static int examine_log(my_string file_name, char **table_names) my_free(buff,MYF(0)); break; case MI_LOG_LOCK: - if (my_b_read(&cache,(byte*) head,sizeof(lock_command))) + if (my_b_read(&cache,(uchar*) head,sizeof(lock_command))) goto err; memcpy_fixed(&lock_command,head,sizeof(lock_command)); if (verbose && !record_pos_file && @@ -675,14 +678,14 @@ static int examine_log(my_string file_name, char **table_names) } -static int read_string(IO_CACHE *file, register gptr *to, register uint length) +static int read_string(IO_CACHE *file, register uchar* *to, register uint length) { DBUG_ENTER("read_string"); if (*to) - my_free((gptr) *to,MYF(0)); - if (!(*to= (gptr) my_malloc(length+1,MYF(MY_WME))) || - my_b_read(file,(byte*) *to,length)) + my_free((uchar*) *to,MYF(0)); + if (!(*to= (uchar*) my_malloc(length+1,MYF(MY_WME))) || + my_b_read(file,(uchar*) *to,length)) { if (*to) my_free(*to,MYF(0)); @@ -717,9 +720,9 @@ static int test_if_open (struct file_info *key, } -static void fix_blob_pointers(MI_INFO *info, byte *record) +static void fix_blob_pointers(MI_INFO *info, uchar *record) { - byte *pos; + uchar *pos; MI_BLOB *blob,*end; pos=record+info->s->base.reclength; @@ -801,7 +804,7 @@ static int reopen_closed_file(TREE *tree, struct file_info *fileinfo) /* Try to find record with uniq key */ -static int find_record_with_key(struct file_info *file_info, byte *record) +static int find_record_with_key(struct file_info *file_info, uchar *record) { uint key; MI_INFO *info=file_info->isam; @@ -813,7 +816,7 @@ static int find_record_with_key(struct file_info *file_info, byte *record) info->s->keyinfo[key].flag & HA_NOSAME) { VOID(_mi_make_key(info,key,tmp_key,record,0L)); - return mi_rkey(info,file_info->record,(int) key,(char*) tmp_key,0, + return mi_rkey(info,file_info->record,(int) key,tmp_key,0, HA_READ_KEY_EXACT); } } @@ -836,7 +839,7 @@ static void printf_log(const char *format,...) } -static bool cmp_filename(struct file_info *file_info, my_string name) +static bool cmp_filename(struct file_info *file_info, char * name) { if (!file_info) return 1; diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 6daa062472e..37428ddd279 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -69,8 +69,8 @@ typedef struct st_huff_counts { my_off_t pre_space[8]; my_off_t tot_end_space,tot_pre_space,zero_fields,empty_fields,bytes_packed; TREE int_tree; /* Tree for detecting distinct column values. */ - byte *tree_buff; /* Column values, 'field_length' each. */ - byte *tree_pos; /* Points to end of column values in 'tree_buff'. */ + uchar *tree_buff; /* Column values, 'field_length' each. */ + uchar *tree_pos; /* Points to end of column values in 'tree_buff'. */ } HUFF_COUNTS; typedef struct st_huff_element HUFF_ELEMENT; @@ -141,8 +141,8 @@ static int test_space_compress(HUFF_COUNTS *huff_counts,my_off_t records, enum en_fieldtype field_type); static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts,uint trees); static int make_huff_tree(HUFF_TREE *tree,HUFF_COUNTS *huff_counts); -static int compare_huff_elements(void *not_used, byte *a,byte *b); -static int save_counts_in_queue(byte *key,element_count count, +static int compare_huff_elements(void *not_used, uchar *a,uchar *b); +static int save_counts_in_queue(uchar *key,element_count count, HUFF_TREE *tree); static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,uint flag); static uint join_same_trees(HUFF_COUNTS *huff_counts,uint trees); @@ -171,7 +171,7 @@ static int save_state(MI_INFO *isam_file,PACK_MRG_INFO *mrg,my_off_t new_length, static int save_state_mrg(File file,PACK_MRG_INFO *isam_file,my_off_t new_length, ha_checksum crc); static int mrg_close(PACK_MRG_INFO *mrg); -static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf); +static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf); static void mrg_reset(PACK_MRG_INFO *mrg); #if !defined(DBUG_OFF) static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count); @@ -257,10 +257,10 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"backup", 'b', "Make a backup of the table as table_name.OLD.", - (gptr*) &backup, (gptr*) &backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + (uchar**) &backup, (uchar**) &backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR_MP, - "Directory where character sets are.", (gptr*) &charsets_dir, - (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Directory where character sets are.", (uchar**) &charsets_dir, + (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', @@ -268,7 +268,7 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"join", 'j', "Join all given tables into 'new_table_name'. All tables MUST have identical layouts.", - (gptr*) &join_table, (gptr*) &join_table, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, + (uchar**) &join_table, (uchar**) &join_table, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -282,8 +282,8 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"wait", 'w', "Wait and retry if table is in use.", (gptr*) &opt_wait, - (gptr*) &opt_wait, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"wait", 'w', "Wait and retry if table is in use.", (uchar**) &opt_wait, + (uchar**) &opt_wait, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -478,7 +478,7 @@ static bool open_isam_files(PACK_MRG_INFO *mrg,char **names,uint count) error: while (i--) mi_close(mrg->file[i]); - my_free((gptr) mrg->file,MYF(0)); + my_free((uchar*) mrg->file,MYF(0)); return 1; } @@ -513,14 +513,14 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table) { /* Make a new indexfile based on first file in list */ uint length; - char *buff; + uchar *buff; strmov(org_name,result_table); /* Fix error messages */ VOID(fn_format(new_name,result_table,"",MI_NAME_IEXT,2)); if ((join_isam_file=my_create(new_name,0,tmpfile_createflag,MYF(MY_WME))) < 0) goto err; length=(uint) share->base.keystart; - if (!(buff=my_malloc(length,MYF(MY_WME)))) + if (!(buff= (uchar*) my_malloc(length,MYF(MY_WME)))) goto err; if (my_pread(share->kfile,buff,length,0L,MYF(MY_WME | MY_NABP)) || my_write(join_isam_file,buff,length, @@ -644,7 +644,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table) new_length=file_buffer.pos_in_file; if (!error && !test_only) { - char buff[MEMMAP_EXTRA_MARGIN]; /* End marginal for memmap */ + uchar buff[MEMMAP_EXTRA_MARGIN]; /* End marginal for memmap */ bzero(buff,sizeof(buff)); error=my_write(file_buffer.file,buff,sizeof(buff), MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)) != 0; @@ -811,11 +811,11 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees, for (i=0 ; i < trees ; i++) { if (huff_trees[i].element_buffer) - my_free((gptr) huff_trees[i].element_buffer,MYF(0)); + my_free((uchar*) huff_trees[i].element_buffer,MYF(0)); if (huff_trees[i].code) - my_free((gptr) huff_trees[i].code,MYF(0)); + my_free((uchar*) huff_trees[i].code,MYF(0)); } - my_free((gptr) huff_trees,MYF(0)); + my_free((uchar*) huff_trees,MYF(0)); } if (huff_counts) { @@ -823,11 +823,11 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees, { if (huff_counts[i].tree_buff) { - my_free((gptr) huff_counts[i].tree_buff,MYF(0)); + my_free((uchar*) huff_counts[i].tree_buff,MYF(0)); delete_tree(&huff_counts[i].int_tree); } } - my_free((gptr) huff_counts,MYF(0)); + my_free((uchar*) huff_counts,MYF(0)); } delete_queue(&queue); /* This is safe to free */ return; @@ -840,7 +840,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) int error; uint length; ulong reclength,max_blob_length; - byte *record,*pos,*next_pos,*end_pos,*start_pos; + uchar *record,*pos,*next_pos,*end_pos,*start_pos; ha_rows record_count; my_bool static_row_size; HUFF_COUNTS *count,*end_count; @@ -848,7 +848,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) DBUG_ENTER("get_statistic"); reclength=mrg->file[0]->s->base.reclength; - record=(byte*) my_alloca(reclength); + record=(uchar*) my_alloca(reclength); end_count=huff_counts+mrg->file[0]->s->base.fields; record_count=0; glob_crc=0; max_blob_length=0; @@ -1032,7 +1032,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) { uint i; /* Zero fields are just counted. Go to the next record. */ - if (!memcmp((byte*) start_pos,zero_string,count->field_length)) + if (!memcmp((uchar*) start_pos,zero_string,count->field_length)) { count->zero_fields++; continue; @@ -1141,12 +1141,12 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) mrg->records=record_count; mrg->max_blob_length=max_blob_length; - my_afree((gptr) record); + my_afree((uchar*) record); DBUG_RETURN(error != HA_ERR_END_OF_FILE); } static int compare_huff_elements(void *not_used __attribute__((unused)), - byte *a, byte *b) + uchar *a, uchar *b) { return *((my_off_t*) a) < *((my_off_t*) b) ? -1 : (*((my_off_t*) a) == *((my_off_t*) b) ? 0 : 1); @@ -1162,7 +1162,7 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees, my_off_t old_length,new_length,length; DBUG_ENTER("check_counts"); - bzero((gptr) field_count,sizeof(field_count)); + bzero((uchar*) field_count,sizeof(field_count)); space_fields=fill_zero_fields=0; for (; trees-- ; huff_counts++) @@ -1328,12 +1328,12 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees, } else { - my_free((gptr) huff_counts->tree_buff,MYF(0)); + my_free((uchar*) huff_counts->tree_buff,MYF(0)); delete_tree(&huff_counts->int_tree); huff_counts->tree_buff=0; } if (tree.element_buffer) - my_free((gptr) tree.element_buffer,MYF(0)); + my_free((uchar*) tree.element_buffer,MYF(0)); } if (huff_counts->pack_type & PACK_TYPE_SPACE_FIELDS) space_fields++; @@ -1450,8 +1450,8 @@ static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts, uint trees) if (make_huff_tree(huff_tree+tree,huff_counts+tree)) { while (tree--) - my_free((gptr) huff_tree[tree].element_buffer,MYF(0)); - my_free((gptr) huff_tree,MYF(0)); + my_free((uchar*) huff_tree[tree].element_buffer,MYF(0)); + my_free((uchar*) huff_tree,MYF(0)); DBUG_RETURN(0); } } @@ -1526,7 +1526,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) { HUFF_ELEMENT *temp; if (!(temp= - (HUFF_ELEMENT*) my_realloc((gptr) huff_tree->element_buffer, + (HUFF_ELEMENT*) my_realloc((uchar*) huff_tree->element_buffer, found*2*sizeof(HUFF_ELEMENT), MYF(MY_WME)))) return 1; @@ -1561,7 +1561,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) */ tree_walk(&huff_counts->int_tree, (int (*)(void*, element_count,void*)) save_counts_in_queue, - (gptr) huff_tree, left_root_right); + (uchar*) huff_tree, left_root_right); } else { @@ -1587,7 +1587,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) new_huff_el->count=huff_counts->counts[i]; new_huff_el->a.leaf.null=0; new_huff_el->a.leaf.element_nr=i; - queue.root[found]=(byte*) new_huff_el; + queue.root[found]=(uchar*) new_huff_el; } } /* @@ -1604,7 +1604,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) new_huff_el->a.leaf.element_nr=huff_tree->min_chr=last-1; else new_huff_el->a.leaf.element_nr=huff_tree->max_chr=last+1; - queue.root[found]=(byte*) new_huff_el; + queue.root[found]=(uchar*) new_huff_el; } } @@ -1654,7 +1654,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) Replace the copied top element by the new element and re-order the queue. */ - queue.root[1]=(byte*) new_huff_el; + queue.root[1]=(uchar*) new_huff_el; queue_replaced(&queue); } huff_tree->root=(HUFF_ELEMENT*) queue.root[1]; @@ -1693,7 +1693,7 @@ static int compare_tree(void* cmp_arg __attribute__((unused)), 0 */ -static int save_counts_in_queue(byte *key, element_count count, +static int save_counts_in_queue(uchar *key, element_count count, HUFF_TREE *tree) { HUFF_ELEMENT *new_huff_el; @@ -1703,7 +1703,7 @@ static int save_counts_in_queue(byte *key, element_count count, new_huff_el->a.leaf.null=0; new_huff_el->a.leaf.element_nr= (uint) (key- tree->counts->tree_buff) / tree->counts->field_length; - queue.root[tree->elements]=(byte*) new_huff_el; + queue.root[tree->elements]=(uchar*) new_huff_el; return 0; } @@ -1760,7 +1760,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts, first=i; last=i; /* We start with root[1], which is the queues top element. */ - queue.root[found]=(byte*) &huff_counts->counts[i]; + queue.root[found]=(uchar*) &huff_counts->counts[i]; } } if (!found) @@ -1771,7 +1771,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts, the loop, which follows the Huffman algorithm. */ if (found < 2) - queue.root[++found]=(byte*) &huff_counts->counts[last ? 0 : 1]; + queue.root[++found]=(uchar*) &huff_counts->counts[last ? 0 : 1]; /* Make a queue from the queue buffer. */ queue.elements=found; @@ -1826,7 +1826,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts, queue. This successively replaces the references to counts by references to HUFF_ELEMENTs. */ - queue.root[1]=(byte*) new_huff_el; + queue.root[1]=(uchar*) new_huff_el; queue_replaced(&queue); } DBUG_RETURN(bytes_packed+(bits_packed+7)/8); @@ -1859,12 +1859,12 @@ static uint join_same_trees(HUFF_COUNTS *huff_counts, uint trees) i->tree->tree_pack_length+j->tree->tree_pack_length+ ALLOWED_JOIN_DIFF) { - memcpy_fixed((byte*) i->counts,(byte*) count.counts, + memcpy_fixed((uchar*) i->counts,(uchar*) count.counts, sizeof(count.counts[0])*256); - my_free((gptr) j->tree->element_buffer,MYF(0)); + my_free((uchar*) j->tree->element_buffer,MYF(0)); j->tree->element_buffer=0; j->tree=i->tree; - bmove((byte*) i->counts,(byte*) count.counts, + bmove((uchar*) i->counts,(uchar*) count.counts, sizeof(count.counts[0])*256); if (make_huff_tree(i->tree,i)) return (uint) -1; @@ -2007,7 +2007,7 @@ static char *hexdigits(ulonglong value) static int write_header(PACK_MRG_INFO *mrg,uint head_length,uint trees, my_off_t tot_elements,my_off_t filelength) { - byte *buff= (byte*) file_buffer.pos; + uchar *buff= (uchar*) file_buffer.pos; bzero(buff,HEAD_LENGTH); memcpy_fixed(buff,myisam_pack_file_magic,4); @@ -2023,7 +2023,7 @@ static int write_header(PACK_MRG_INFO *mrg,uint head_length,uint trees, if (test_only) return 0; VOID(my_seek(file_buffer.file,0L,MY_SEEK_SET,MYF(0))); - return my_write(file_buffer.file,(const byte *) file_buffer.pos,HEAD_LENGTH, + return my_write(file_buffer.file,(const uchar *) file_buffer.pos,HEAD_LENGTH, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)) != 0; } @@ -2159,7 +2159,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) { /* This should be impossible */ VOID(fprintf(stderr, "Tree offset got too big: %d, aborted\n", huff_tree->max_offset)); - my_afree((gptr) packed_tree); + my_afree((uchar*) packed_tree); return 0; } @@ -2331,7 +2331,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) DBUG_PRINT("info", (" ")); if (verbose >= 2) VOID(printf("\n")); - my_afree((gptr) packed_tree); + my_afree((uchar*) packed_tree); if (errors) { VOID(fprintf(stderr, "Error: Generated decode trees are corrupt. Stop.\n")); @@ -2412,7 +2412,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) my_off_t record_count; char llbuf[32]; ulong length,pack_length; - byte *record,*pos,*end_pos,*record_pos,*start_pos; + uchar *record,*pos,*end_pos,*record_pos,*start_pos; HUFF_COUNTS *count,*end_count; HUFF_TREE *tree; MI_INFO *isam_file=mrg->file[0]; @@ -2420,7 +2420,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) DBUG_ENTER("compress_isam_file"); /* Allocate a buffer for the records (excluding blobs). */ - if (!(record=(byte*) my_alloca(isam_file->s->base.reclength))) + if (!(record=(uchar*) my_alloca(isam_file->s->base.reclength))) return -1; end_count=huff_counts+isam_file->s->base.fields; @@ -2471,7 +2471,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) { if (flush_buffer((ulong) max_calc_length + (ulong) max_pack_length)) break; - record_pos= (byte*) file_buffer.pos; + record_pos= (uchar*) file_buffer.pos; file_buffer.pos+=max_pack_length; for (start_pos=record, count= huff_counts; count < end_count ; count++) { @@ -2508,7 +2508,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) switch (count->field_type) { case FIELD_SKIP_ZERO: - if (!memcmp((byte*) start_pos,zero_string,field_length)) + if (!memcmp((uchar*) start_pos,zero_string,field_length)) { DBUG_PRINT("fields", ("FIELD_SKIP_ZERO zeroes only, bits: 1")); write_bits(1,1); @@ -2637,7 +2637,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) break; case FIELD_INTERVALL: global_count=count; - pos=(byte*) tree_search(&count->int_tree, start_pos, + pos=(uchar*) tree_search(&count->int_tree, start_pos, count->int_tree.custom_arg); intervall=(uint) (pos - count->tree_buff)/field_length; DBUG_PRINT("fields", ("FIELD_INTERVALL")); @@ -2660,7 +2660,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } else { - byte *blob,*blob_end; + uchar *blob,*blob_end; DBUG_PRINT("fields", ("FIELD_BLOB not empty, bits: 1")); write_bits(0,1); /* Write the blob length. */ @@ -2689,8 +2689,9 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } case FIELD_VARCHAR: { - uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); - ulong col_length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint var_pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + ulong col_length= (var_pack_length == 1 ? + (uint) *(uchar*) start_pos : uint2korr(start_pos)); /* Empty varchar are encoded with a single 1 bit. */ if (!col_length) @@ -2700,7 +2701,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } else { - byte *end=start_pos+pack_length+col_length; + uchar *end= start_pos + var_pack_length + col_length; DBUG_PRINT("fields", ("FIELD_VARCHAR not empty, bits: 1")); write_bits(0,1); /* Write the varchar length. */ @@ -2708,7 +2709,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) col_length, count->length_bits)); write_bits(col_length,count->length_bits); /* Encode the varchar bytes. */ - for (start_pos+=pack_length ; start_pos < end ; start_pos++) + for (start_pos+= var_pack_length ; start_pos < end ; start_pos++) { DBUG_PRINT("fields", ("value: 0x%02x code: 0x%s bits: %2u bin: %s", @@ -2732,7 +2733,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) DBUG_PRINT("fields", ("---")); } flush_bits(); - length=(ulong) ((byte*) file_buffer.pos - record_pos) - max_pack_length; + length=(ulong) ((uchar*) file_buffer.pos - record_pos) - max_pack_length; pack_length= save_pack_length(pack_version, record_pos, length); if (pack_blob_length) pack_length+= save_pack_length(pack_version, record_pos + pack_length, @@ -2772,7 +2773,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) if (verbose >= 2) VOID(printf("wrote %s records.\n", llstr((longlong) record_count, llbuf))); - my_afree((gptr) record); + my_afree((uchar*) record); mrg->ref_length=max_pack_length; mrg->min_pack_length=max_record_length ? min_record_length : 0; mrg->max_pack_length=max_record_length; @@ -2839,7 +2840,7 @@ static int flush_buffer(ulong neaded_length) if (test_only) return 0; if (error_on_write|| my_write(file_buffer.file, - (const byte*) file_buffer.buffer, + (const uchar*) file_buffer.buffer, length, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL))) { @@ -2866,7 +2867,7 @@ static int flush_buffer(ulong neaded_length) static void end_file_buffer(void) { - my_free((gptr) file_buffer.buffer,MYF(0)); + my_free((uchar*) file_buffer.buffer,MYF(0)); } /* output `bits` low bits of `value' */ @@ -3024,7 +3025,7 @@ static void mrg_reset(PACK_MRG_INFO *mrg) } } -static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf) +static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf) { int error; MI_INFO *isam_info; @@ -3047,7 +3048,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf) for (;;) { isam_info->update&= HA_STATE_CHANGED; - if (!(error=(*isam_info->s->read_rnd)(isam_info,(byte*) buf, + if (!(error=(*isam_info->s->read_rnd)(isam_info,(uchar*) buf, filepos, 1)) || error != HA_ERR_END_OF_FILE) return (error); @@ -3070,7 +3071,7 @@ static int mrg_close(PACK_MRG_INFO *mrg) for (i=0 ; i < mrg->count ; i++) error|=mi_close(mrg->file[i]); if (mrg->free_file) - my_free((gptr) mrg->file,MYF(0)); + my_free((uchar*) mrg->file,MYF(0)); return error; } @@ -3133,7 +3134,7 @@ static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count) */ if (huff_counts->tree_buff) { - my_free((gptr) huff_counts->tree_buff, MYF(0)); + my_free((uchar*) huff_counts->tree_buff, MYF(0)); delete_tree(&huff_counts->int_tree); huff_counts->tree_buff= NULL; DBUG_PRINT("fakebigcodes", ("freed distinct column values")); diff --git a/storage/myisam/rt_index.c b/storage/myisam/rt_index.c index 99080c22644..63ed60586d6 100644 --- a/storage/myisam/rt_index.c +++ b/storage/myisam/rt_index.c @@ -141,11 +141,11 @@ static int rtree_find_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint search_flag, res = 1; ok: - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); return res; err1: - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); info->lastpos = HA_OFFSET_ERROR; return -1; } @@ -184,6 +184,7 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length, /* Save searched key, include data pointer. The data pointer is required if the search_flag contains MBR_DATA. + (minimum bounding rectangle) */ memcpy(info->first_mbr_key, key, keyinfo->keylength); info->last_rkey_length = key_length; @@ -355,11 +356,11 @@ static int rtree_get_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint key_length, res = 1; ok: - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); return res; err1: - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); info->lastpos = HA_OFFSET_ERROR; return -1; } @@ -538,16 +539,19 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, uint nod_flag; uchar *page_buf; int res; + DBUG_ENTER("rtree_insert_req"); if (!(page_buf = (uchar*)my_alloca((uint)keyinfo->block_length + MI_MAX_KEY_BUFF))) { my_errno = HA_ERR_OUT_OF_MEM; - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0)) goto err1; nod_flag = mi_test_if_nod(page_buf); + DBUG_PRINT("rtree", ("page: %lu level: %d ins_level: %d nod_flag: %u", + (ulong) page, level, ins_level, nod_flag)); if ((ins_level == -1 && nod_flag) || /* key: go down to leaf */ (ins_level > -1 && ins_level > level)) /* branch: go down to ins_level */ @@ -598,12 +602,12 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, } ok: - my_afree((byte*)page_buf); - return res; + my_afree((uchar*)page_buf); + DBUG_RETURN(res); err1: - my_afree((byte*)page_buf); - return -1; + my_afree((uchar*)page_buf); + DBUG_RETURN(-1); /* purecov: inspected */ } @@ -623,20 +627,19 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key, MI_KEYDEF *keyinfo = info->s->keyinfo + keynr; int res; my_off_t new_page; - + DBUG_ENTER("rtree_insert_level"); + if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR) { - int res; - if ((old_root = _mi_new(info, keyinfo, DFLT_INIT_HITS)) == HA_OFFSET_ERROR) - return -1; + DBUG_RETURN(-1); info->buff_used = 1; mi_putint(info->buff, 2, 0); res = rtree_add_key(info, keyinfo, key, key_length, info->buff, NULL); if (_mi_write_keypage(info, keyinfo, old_root, DFLT_INIT_HITS, info->buff)) - return 1; + DBUG_RETURN(1); info->s->state.key_root[keynr] = old_root; - return res; + DBUG_RETURN(res); } switch ((res = rtree_insert_req(info, keyinfo, key, key_length, @@ -653,11 +656,12 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key, uchar *new_key; uint nod_flag = info->s->base.key_reflength; + DBUG_PRINT("rtree", ("root was split, grow a new root")); if (!(new_root_buf = (uchar*)my_alloca((uint)keyinfo->block_length + MI_MAX_KEY_BUFF))) { my_errno = HA_ERR_OUT_OF_MEM; - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } mi_putint(new_root_buf, 2, nod_flag); @@ -683,12 +687,14 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key, DFLT_INIT_HITS, new_root_buf)) goto err1; info->s->state.key_root[keynr] = new_root; + DBUG_PRINT("rtree", ("new root page: %lu level: %d nod_flag: %u", + (ulong) new_root, 0, mi_test_if_nod(new_root_buf))); - my_afree((byte*)new_root_buf); + my_afree((uchar*)new_root_buf); break; err1: - my_afree((byte*)new_root_buf); - return -1; + my_afree((uchar*)new_root_buf); + DBUG_RETURN(-1); /* purecov: inspected */ } default: case -1: /* error */ @@ -696,7 +702,7 @@ err1: break; } } - return res; + DBUG_RETURN(res); } @@ -710,8 +716,10 @@ err1: int rtree_insert(MI_INFO *info, uint keynr, uchar *key, uint key_length) { - return (!key_length || - (rtree_insert_level(info, keynr, key, key_length, -1) == -1)) ? -1 : 0; + DBUG_ENTER("rtree_insert"); + DBUG_RETURN((!key_length || + (rtree_insert_level(info, keynr, key, key_length, -1) == -1)) ? + -1 : 0); } @@ -726,10 +734,12 @@ int rtree_insert(MI_INFO *info, uint keynr, uchar *key, uint key_length) static int rtree_fill_reinsert_list(stPageList *ReinsertList, my_off_t page, int level) { + DBUG_ENTER("rtree_fill_reinsert_list"); + DBUG_PRINT("rtree", ("page: %lu level: %d", (ulong) page, level)); if (ReinsertList->n_pages == ReinsertList->m_pages) { ReinsertList->m_pages += REINSERT_BUFFER_INC; - if (!(ReinsertList->pages = (stPageLevel*)my_realloc((gptr)ReinsertList->pages, + if (!(ReinsertList->pages = (stPageLevel*)my_realloc((uchar*)ReinsertList->pages, ReinsertList->m_pages * sizeof(stPageLevel), MYF(MY_ALLOW_ZERO_PTR)))) goto err1; } @@ -737,10 +747,10 @@ static int rtree_fill_reinsert_list(stPageList *ReinsertList, my_off_t page, ReinsertList->pages[ReinsertList->n_pages].offs = page; ReinsertList->pages[ReinsertList->n_pages].level = level; ReinsertList->n_pages++; - return 0; + DBUG_RETURN(0); err1: - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } @@ -764,15 +774,18 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, uint nod_flag; uchar *page_buf; int res; + DBUG_ENTER("rtree_delete_req"); if (!(page_buf = (uchar*)my_alloca((uint)keyinfo->block_length))) { my_errno = HA_ERR_OUT_OF_MEM; - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0)) goto err1; nod_flag = mi_test_if_nod(page_buf); + DBUG_PRINT("rtree", ("page: %lu level: %d nod_flag: %u", + (ulong) page, level, nod_flag)); k = rt_PAGE_FIRST_KEY(page_buf, nod_flag); last = rt_PAGE_END(page_buf); @@ -793,6 +806,7 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, if (*page_size + key_length >= rt_PAGE_MIN_SIZE(keyinfo->block_length)) { /* OK */ + /* Calculate a new key value (MBR) for the shrinked block. */ if (rtree_set_key_mbr(info, keyinfo, k, key_length, _mi_kpos(nod_flag, k))) goto err1; @@ -802,10 +816,23 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, } else { - /* too small: delete key & add it descendant to reinsert list */ + /* + Too small: delete key & add it descendant to reinsert list. + Store position and level of the block so that it can be + accessed later for inserting the remaining keys. + */ + DBUG_PRINT("rtree", ("too small. move block to reinsert list")); if (rtree_fill_reinsert_list(ReinsertList, _mi_kpos(nod_flag, k), level + 1)) goto err1; + /* + Delete the key that references the block. This makes the + block disappear from the index. Hence we need to insert + its remaining keys later. Note: if the block is a branch + block, we do not only remove this block, but the whole + subtree. So we need to re-insert its keys on the same + level later to reintegrate the subtrees. + */ rtree_delete_key(info, page_buf, k, key_length, nod_flag); if (_mi_write_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf)) @@ -864,12 +891,12 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, res = 1; ok: - my_afree((byte*)page_buf); - return res; + my_afree((uchar*)page_buf); + DBUG_RETURN(res); err1: - my_afree((byte*)page_buf); - return -1; + my_afree((uchar*)page_buf); + DBUG_RETURN(-1); /* purecov: inspected */ } @@ -887,12 +914,15 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length) stPageList ReinsertList; my_off_t old_root; MI_KEYDEF *keyinfo = info->s->keyinfo + keynr; + DBUG_ENTER("rtree_delete"); if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR) { my_errno= HA_ERR_END_OF_FILE; - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } + DBUG_PRINT("rtree", ("starting deletion at root page: %lu", + (ulong) old_root)); ReinsertList.pages = NULL; ReinsertList.n_pages = 0; @@ -901,19 +931,18 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length) switch (rtree_delete_req(info, keyinfo, key, key_length, old_root, &page_size, &ReinsertList, 0)) { - case 2: + case 2: /* empty */ { info->s->state.key_root[keynr] = HA_OFFSET_ERROR; - return 0; + DBUG_RETURN(0); } - case 0: + case 0: /* deleted */ { uint nod_flag; ulong i; for (i = 0; i < ReinsertList.n_pages; ++i) { uchar *page_buf; - uint nod_flag; uchar *k; uchar *last; @@ -926,24 +955,42 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length) DFLT_INIT_HITS, page_buf, 0)) goto err1; nod_flag = mi_test_if_nod(page_buf); + DBUG_PRINT("rtree", ("reinserting keys from " + "page: %lu level: %d nod_flag: %u", + (ulong) ReinsertList.pages[i].offs, + ReinsertList.pages[i].level, nod_flag)); + k = rt_PAGE_FIRST_KEY(page_buf, nod_flag); last = rt_PAGE_END(page_buf); for (; k < last; k = rt_PAGE_NEXT_KEY(k, key_length, nod_flag)) { - if (rtree_insert_level(info, keynr, k, key_length, - ReinsertList.pages[i].level) == -1) + int res; + if ((res= rtree_insert_level(info, keynr, k, key_length, + ReinsertList.pages[i].level)) == -1) { - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); goto err1; } + if (res) + { + ulong j; + DBUG_PRINT("rtree", ("root has been split, adjust levels")); + for (j= i; j < ReinsertList.n_pages; j++) + { + ReinsertList.pages[j].level++; + DBUG_PRINT("rtree", ("keys from page: %lu now level: %d", + (ulong) ReinsertList.pages[i].offs, + ReinsertList.pages[i].level)); + } + } } - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); if (_mi_dispose(info, keyinfo, ReinsertList.pages[i].offs, DFLT_INIT_HITS)) goto err1; } if (ReinsertList.pages) - my_free((byte*) ReinsertList.pages, MYF(0)); + my_free((uchar*) ReinsertList.pages, MYF(0)); /* check for redundant root (not leaf, 1 child) and eliminate */ if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR) @@ -962,20 +1009,20 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length) info->s->state.key_root[keynr] = new_root; } info->update= HA_STATE_DELETED; - return 0; + DBUG_RETURN(0); err1: - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } case 1: /* not found */ { my_errno = HA_ERR_KEY_NOT_FOUND; - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } default: case -1: /* error */ { - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ } } } @@ -1070,11 +1117,11 @@ ha_rows rtree_estimate(MI_INFO *info, uint keynr, uchar *key, res = HA_POS_ERROR; } - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); return res; err1: - my_afree((byte*)page_buf); + my_afree((uchar*)page_buf); return HA_POS_ERROR; } diff --git a/storage/myisam/rt_key.c b/storage/myisam/rt_key.c index cb6a82c51f6..fe59af3c605 100644 --- a/storage/myisam/rt_key.c +++ b/storage/myisam/rt_key.c @@ -34,6 +34,7 @@ int rtree_add_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, { uint page_size = mi_getint(page_buf); uint nod_flag = mi_test_if_nod(page_buf); + DBUG_ENTER("rtree_add_key"); if (page_size + key_length + info->s->base.rec_reflength <= keyinfo->block_length) @@ -42,22 +43,26 @@ int rtree_add_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, if (nod_flag) { /* save key */ + DBUG_ASSERT(_mi_kpos(nod_flag, key) < info->state->key_file_length); memcpy(rt_PAGE_END(page_buf), key - nod_flag, key_length + nod_flag); page_size += key_length + nod_flag; } else { /* save key */ + DBUG_ASSERT(_mi_dpos(info, nod_flag, key + key_length + + info->s->base.rec_reflength) < + info->state->data_file_length + info->s->base.pack_reclength); memcpy(rt_PAGE_END(page_buf), key, key_length + info->s->base.rec_reflength); page_size += key_length + info->s->base.rec_reflength; } mi_putint(page_buf, page_size, nod_flag); - return 0; + DBUG_RETURN(0); } - return (rtree_split_page(info, keyinfo, page_buf, key, key_length, - new_page) ? -1 : 1); + DBUG_RETURN((rtree_split_page(info, keyinfo, page_buf, key, key_length, + new_page) ? -1 : 1)); } /* @@ -89,11 +94,13 @@ int rtree_delete_key(MI_INFO *info, uchar *page_buf, uchar *key, int rtree_set_key_mbr(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key, uint key_length, my_off_t child_page) { + DBUG_ENTER("rtree_set_key_mbr"); + if (!_mi_fetch_keypage(info, keyinfo, child_page, DFLT_INIT_HITS, info->buff, 0)) - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ - return rtree_page_mbr(info, keyinfo->seg, info->buff, key, key_length); + DBUG_RETURN(rtree_page_mbr(info, keyinfo->seg, info->buff, key, key_length)); } #endif /*HAVE_RTREE_KEYS*/ diff --git a/storage/myisam/rt_split.c b/storage/myisam/rt_split.c index 9f25ee608d8..ef988dbd048 100644 --- a/storage/myisam/rt_split.c +++ b/storage/myisam/rt_split.c @@ -264,13 +264,15 @@ int rtree_split_page(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uchar *key, uint full_length= key_length + (nod_flag ? nod_flag : info->s->base.rec_reflength); int max_keys= (mi_getint(page)-2) / (full_length); + DBUG_ENTER("rtree_split_page"); + DBUG_PRINT("rtree", ("splitting block")); n_dim = keyinfo->keysegs / 2; if (!(coord_buf= (double*) my_alloca(n_dim * 2 * sizeof(double) * (max_keys + 1 + 4) + sizeof(SplitStruct) * (max_keys + 1)))) - return -1; + DBUG_RETURN(-1); /* purecov: inspected */ task= (SplitStruct *)(coord_buf + n_dim * 2 * (max_keys + 1 + 4)); @@ -341,12 +343,13 @@ int rtree_split_page(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uchar *key, else err_code= _mi_write_keypage(info, keyinfo, *new_page_offs, DFLT_INIT_HITS, new_page); + DBUG_PRINT("rtree", ("split new block: %lu", (ulong) *new_page_offs)); - my_afree((byte*)new_page); + my_afree((uchar*)new_page); split_err: - my_afree((byte*) coord_buf); - return err_code; + my_afree((uchar*) coord_buf); + DBUG_RETURN(err_code); } #endif /*HAVE_RTREE_KEYS*/ diff --git a/storage/myisam/rt_test.c b/storage/myisam/rt_test.c index 1126266d2f9..7d15afd12ef 100644 --- a/storage/myisam/rt_test.c +++ b/storage/myisam/rt_test.c @@ -28,9 +28,9 @@ #define KEYALG HA_KEY_ALG_RTREE static int read_with_pos(MI_INFO * file, int silent); -static void create_record(char *record,uint rownr); -static void create_record1(char *record,uint rownr); -static void print_record(char * record,my_off_t offs,const char * tail); +static void create_record(uchar *record,uint rownr); +static void create_record1(uchar *record,uint rownr); +static void print_record(uchar * record,my_off_t offs,const char * tail); static int run_test(const char *filename); static double rt_data[]= @@ -108,8 +108,8 @@ static int run_test(const char *filename) int i; int error; int row_count=0; - char record[MAX_REC_LENGTH]; - char read_record[MAX_REC_LENGTH]; + uchar record[MAX_REC_LENGTH]; + uchar read_record[MAX_REC_LENGTH]; int upd= 10; ha_rows hrows; @@ -323,7 +323,7 @@ static int run_test(const char *filename) range.key= record+1; range.length= 1000; /* Big enough */ range.flag= HA_READ_MBR_INTERSECT; - hrows= mi_records_in_range(file,0, &range, (key_range*) 0); + hrows= mi_records_in_range(file, 0, &range, (key_range*) 0); printf(" %ld rows\n", (long) hrows); if (mi_close(file)) goto err; @@ -342,7 +342,7 @@ static int read_with_pos (MI_INFO * file,int silent) { int error; int i; - char read_record[MAX_REC_LENGTH]; + uchar read_record[MAX_REC_LENGTH]; if (!silent) printf("- Reading rows with position\n"); @@ -385,12 +385,12 @@ static void bprint_record(char * record, #endif -static void print_record(char * record, +static void print_record(uchar * record, my_off_t offs __attribute__((unused)), const char * tail) { int i; - char * pos; + uchar * pos; double c; printf(" rec=(%d)",(unsigned char)record[0]); @@ -407,16 +407,16 @@ static void print_record(char * record, -static void create_record1(char *record,uint rownr) +static void create_record1(uchar *record,uint rownr) { int i; - char * pos; + uchar * pos; double c=rownr+10; bzero((char*) record,MAX_REC_LENGTH); record[0]=0x01; /* DEL marker */ - for ( pos=record+1, i=0; i<2*ndims; i++) + for (pos=record+1, i=0; i<2*ndims; i++) { memcpy(pos,&c,sizeof(c)); float8store(pos,c); @@ -426,7 +426,7 @@ static void create_record1(char *record,uint rownr) #ifdef NOT_USED -static void create_record0(char *record,uint rownr) +static void create_record0(uchar *record,uint rownr) { int i; char * pos; @@ -449,16 +449,16 @@ static void create_record0(char *record,uint rownr) #endif -static void create_record(char *record,uint rownr) +static void create_record(uchar *record,uint rownr) { int i; - char *pos; + uchar *pos; double *data= rt_data+rownr*4; record[0]=0x01; /* DEL marker */ - for ( pos=record+1, i=0; i<ndims*2; i++) + for (pos=record+1, i=0; i<ndims*2; i++) { - float8store(pos,data[i]); - pos+=8; + float8store(pos,data[i]); + pos+=8; } } diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c index bc37e0291d2..27e4bd37af7 100644 --- a/storage/myisam/sort.c +++ b/storage/myisam/sort.c @@ -78,13 +78,13 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info,uchar **sort_keys, static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile,BUFFPEK *buffpek, uint sort_length); static int NEAR_F write_merge_key(MI_SORT_PARAM *info, IO_CACHE *to_file, - char *key, uint sort_length, uint count); + uchar *key, uint sort_length, uint count); static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info, IO_CACHE *to_file, - char* key, uint sort_length, + uchar* key, uint sort_length, uint count); static inline int -my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, byte *bufs); +my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, uchar *bufs); /* Creates a index of sorted keys @@ -116,7 +116,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, { info->write_keys=write_keys_varlen; info->read_to_buffer=read_to_buffer_varlen; - info->write_key=write_merge_key_varlen; + info->write_key= write_merge_key_varlen; } else { @@ -138,8 +138,9 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, while (memavl >= MIN_SORT_MEMORY) { - if ((my_off_t) (records+1)*(sort_length+sizeof(char*)) <= - (my_off_t) memavl) + if ((records < UINT_MAX32) && + ((my_off_t) (records + 1) * + (sort_length + sizeof(char*)) <= (my_off_t) memavl)) keys= records+1; else do @@ -151,7 +152,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, keys < (uint) maxbuffer) { mi_check_print_error(info->sort_info->param, - "sort_buffer_size is to small"); + "myisam_sort_buffer_size is too small"); goto err; } } @@ -163,7 +164,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer, maxbuffer/2)) { - my_free((gptr) sort_keys,MYF(0)); + my_free((uchar*) sort_keys,MYF(0)); sort_keys= 0; } else @@ -175,7 +176,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, } if (memavl < MIN_SORT_MEMORY) { - mi_check_print_error(info->sort_info->param,"Sort buffer to small"); /* purecov: tested */ + mi_check_print_error(info->sort_info->param,"MyISAM sort buffer too small"); /* purecov: tested */ goto err; /* purecov: tested */ } (*info->lock_in_memory)(info->sort_info->param);/* Everything is allocated */ @@ -220,9 +221,9 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, if (my_b_inited(&tempfile_for_exceptions)) { - MI_INFO *index=info->sort_info->info; + MI_INFO *idx=info->sort_info->info; uint keyno=info->key; - uint key_length, ref_length=index->s->rec_reflength; + uint key_length, ref_length=idx->s->rec_reflength; if (!no_messages) printf(" - Adding exceptions\n"); /* purecov: tested */ @@ -230,12 +231,12 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, reinit_io_cache(&tempfile_for_exceptions,READ_CACHE,0L,0,0)) goto err; - while (!my_b_read(&tempfile_for_exceptions,(byte*)&key_length, + while (!my_b_read(&tempfile_for_exceptions,(uchar*)&key_length, sizeof(key_length)) - && !my_b_read(&tempfile_for_exceptions,(byte*)sort_keys, + && !my_b_read(&tempfile_for_exceptions,(uchar*)sort_keys, (uint) key_length)) { - if (_mi_ck_write(index,keyno,(uchar*) sort_keys,key_length-ref_length)) + if (_mi_ck_write(idx,keyno,(uchar*) sort_keys,key_length-ref_length)) goto err; } } @@ -244,7 +245,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, err: if (sort_keys) - my_free((gptr) sort_keys,MYF(0)); + my_free((uchar*) sort_keys,MYF(0)); delete_dynamic(&buffpek); close_cached_file(&tempfile); close_cached_file(&tempfile_for_exceptions); @@ -369,7 +370,7 @@ pthread_handler_t thr_find_all_keys(void *arg) keys < (uint) maxbuffer) { mi_check_print_error(sort_param->sort_info->param, - "sort_buffer_size is to small"); + "myisam_sort_buffer_size is too small"); goto err; } } @@ -383,7 +384,7 @@ pthread_handler_t thr_find_all_keys(void *arg) if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK), maxbuffer, maxbuffer/2)) { - my_free((gptr) sort_keys,MYF(0)); + my_free((uchar*) sort_keys,MYF(0)); sort_keys= (uchar **) NULL; /* for err: label */ } else @@ -397,7 +398,7 @@ pthread_handler_t thr_find_all_keys(void *arg) if (memavl < MIN_SORT_MEMORY) { mi_check_print_error(sort_param->sort_info->param, - "Sort buffer too small"); + "MyISAM sort buffer too small"); goto err; /* purecov: tested */ } @@ -453,7 +454,7 @@ err: DBUG_PRINT("error", ("got some error")); sort_param->sort_info->got_error= 1; /* no need to protect with a mutex */ if (sort_keys) - my_free((gptr) sort_keys,MYF(0)); + my_free((uchar*) sort_keys,MYF(0)); sort_param->sort_keys= 0; delete_dynamic(& sort_param->buffpek); close_cached_file(&sort_param->tempfile); @@ -495,7 +496,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param) MI_INFO *info=sort_info->info; MYISAM_SHARE *share=info->s; MI_SORT_PARAM *sinfo; - byte *mergebuf=0; + uchar *mergebuf=0; DBUG_ENTER("thr_write_keys"); LINT_INIT(length); @@ -530,7 +531,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param) sinfo->notnull: NULL, (ulonglong) info->state->records); } - my_free((gptr) sinfo->sort_keys,MYF(0)); + my_free((uchar*) sinfo->sort_keys,MYF(0)); my_free(mi_get_rec_buff_ptr(info, sinfo->rec_buff), MYF(MY_ALLOW_ZERO_PTR)); sinfo->sort_keys=0; @@ -621,12 +622,12 @@ int thr_write_keys(MI_SORT_PARAM *sort_param) } while (!got_error && - !my_b_read(&sinfo->tempfile_for_exceptions,(byte*)&key_length, + !my_b_read(&sinfo->tempfile_for_exceptions,(uchar*)&key_length, sizeof(key_length))) { - byte ft_buf[HA_FT_MAXBYTELEN + HA_FT_WLEN + 10]; + uchar ft_buf[HA_FT_MAXBYTELEN + HA_FT_WLEN + 10]; if (key_length > sizeof(ft_buf) || - my_b_read(&sinfo->tempfile_for_exceptions, (byte*)ft_buf, + my_b_read(&sinfo->tempfile_for_exceptions, (uchar*)ft_buf, (uint)key_length) || _mi_ck_write(info, sinfo->key, (uchar*)ft_buf, key_length - info->s->rec_reflength)) @@ -634,7 +635,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param) } } } - my_free((gptr) mergebuf,MYF(MY_ALLOW_ZERO_PTR)); + my_free((uchar*) mergebuf,MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(got_error); } #endif /* THREAD */ @@ -648,7 +649,7 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys, uint sort_length=info->key_length; DBUG_ENTER("write_keys"); - qsort2((byte*) sort_keys,count,sizeof(byte*),(qsort2_cmp) info->key_cmp, + qsort2((uchar*) sort_keys,count,sizeof(uchar*),(qsort2_cmp) info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", @@ -660,7 +661,7 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys, for (end=sort_keys+count ; sort_keys != end ; sort_keys++) { - if (my_b_write(tempfile,(byte*) *sort_keys,(uint) sort_length)) + if (my_b_write(tempfile,(uchar*) *sort_keys,(uint) sort_length)) DBUG_RETURN(1); /* purecov: inspected */ } DBUG_RETURN(0); @@ -668,13 +669,13 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys, static inline int -my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, byte *bufs) +my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, uchar *bufs) { int err; uint16 len = _mi_keylength(info->keyinfo, (uchar*) bufs); /* The following is safe as this is a local file */ - if ((err= my_b_write(to_file, (byte*)&len, sizeof(len)))) + if ((err= my_b_write(to_file, (uchar*)&len, sizeof(len)))) return (err); if ((err= my_b_write(to_file,bufs, (uint) len))) return (err); @@ -691,7 +692,7 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info, int err; DBUG_ENTER("write_keys_varlen"); - qsort2((byte*) sort_keys,count,sizeof(byte*),(qsort2_cmp) info->key_cmp, + qsort2((uchar*) sort_keys,count,sizeof(uchar*),(qsort2_cmp) info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", @@ -702,7 +703,7 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info, buffpek->count=count; for (end=sort_keys+count ; sort_keys != end ; sort_keys++) { - if ((err= my_var_write(info,tempfile, (byte*) *sort_keys))) + if ((err= my_var_write(info,tempfile, (uchar*) *sort_keys))) DBUG_RETURN(err); } DBUG_RETURN(0); @@ -720,8 +721,8 @@ static int NEAR_F write_key(MI_SORT_PARAM *info, uchar *key, DISK_BUFFER_SIZE, info->sort_info->param->myf_rw)) DBUG_RETURN(1); - if (my_b_write(tempfile,(byte*)&key_length,sizeof(key_length)) || - my_b_write(tempfile,(byte*)key,(uint) key_length)) + if (my_b_write(tempfile,(uchar*)&key_length,sizeof(key_length)) || + my_b_write(tempfile,(uchar*)key,(uint) key_length)) DBUG_RETURN(1); DBUG_RETURN(0); } /* write_key */ @@ -734,7 +735,7 @@ static int NEAR_F write_index(MI_SORT_PARAM *info, register uchar **sort_keys, { DBUG_ENTER("write_index"); - qsort2((gptr) sort_keys,(size_t) count,sizeof(byte*), + qsort2((uchar*) sort_keys,(size_t) count,sizeof(uchar*), (qsort2_cmp) info->key_cmp,info); while (count--) { @@ -773,7 +774,7 @@ static int NEAR_F merge_many_buff(MI_SORT_PARAM *info, uint keys, { if (merge_buffers(info,keys,from_file,to_file,sort_keys,lastbuff++, buffpek+i,buffpek+i+MERGEBUFF-1)) - break; /* purecov: inspected */ + goto cleanup; } if (merge_buffers(info,keys,from_file,to_file,sort_keys,lastbuff++, buffpek+i,buffpek+ *maxbuffer)) @@ -783,6 +784,7 @@ static int NEAR_F merge_many_buff(MI_SORT_PARAM *info, uint keys, temp=from_file; from_file=to_file; to_file=temp; *maxbuffer= (int) (lastbuff-buffpek)-1; } +cleanup: close_cached_file(to_file); /* This holds old result */ if (to_file == t_file) *t_file=t_file2; /* Copy result file */ @@ -812,7 +814,7 @@ static uint NEAR_F read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count))) { - if (my_pread(fromfile->file,(byte*) buffpek->base, + if (my_pread(fromfile->file,(uchar*) buffpek->base, (length= sort_length*count),buffpek->file_pos,MYF_RW)) return((uint) -1); /* purecov: inspected */ buffpek->key=buffpek->base; @@ -837,11 +839,11 @@ static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek, for (idx=1;idx<=count;idx++) { - if (my_pread(fromfile->file,(byte*)&length_of_key,sizeof(length_of_key), + if (my_pread(fromfile->file,(uchar*)&length_of_key,sizeof(length_of_key), buffpek->file_pos,MYF_RW)) return((uint) -1); buffpek->file_pos+=sizeof(length_of_key); - if (my_pread(fromfile->file,(byte*) buffp,length_of_key, + if (my_pread(fromfile->file,(uchar*) buffp,length_of_key, buffpek->file_pos,MYF_RW)) return((uint) -1); buffpek->file_pos+=length_of_key; @@ -856,16 +858,16 @@ static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek, static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info, - IO_CACHE *to_file,char* key, + IO_CACHE *to_file, uchar* key, uint sort_length, uint count) { uint idx; + uchar *bufs = key; - char *bufs = key; for (idx=1;idx<=count;idx++) { int err; - if ((err= my_var_write(info,to_file, (byte*) bufs))) + if ((err= my_var_write(info, to_file, bufs))) return (err); bufs=bufs+sort_length; } @@ -874,10 +876,10 @@ static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info, static int NEAR_F write_merge_key(MI_SORT_PARAM *info __attribute__((unused)), - IO_CACHE *to_file, char* key, + IO_CACHE *to_file, uchar *key, uint sort_length, uint count) { - return my_b_write(to_file,(byte*) key,(uint) sort_length*count); + return my_b_write(to_file, key, (size_t) sort_length*count); } /* @@ -909,7 +911,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file, sort_length=info->key_length; if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0, - (int (*)(void*, byte *,byte*)) info->key_cmp, + (int (*)(void*, uchar *,uchar*)) info->key_cmp, (void*) info)) DBUG_RETURN(1); /* purecov: inspected */ @@ -922,7 +924,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file, sort_length)); if (error == -1) goto err; /* purecov: inspected */ - queue_insert(&queue,(char*) buffpek); + queue_insert(&queue,(uchar*) buffpek); } while (queue.elements > 1) @@ -936,7 +938,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file, buffpek=(BUFFPEK*) queue_top(&queue); if (to_file) { - if (info->write_key(info,to_file,(byte*) buffpek->key, + if (info->write_key(info,to_file,(uchar*) buffpek->key, (uint) sort_length,1)) { error=1; goto err; /* purecov: inspected */ @@ -992,7 +994,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file, { if (to_file) { - if (info->write_key(info,to_file,(byte*) buffpek->key, + if (info->write_key(info,to_file,(uchar*) buffpek->key, sort_length,buffpek->mem_count)) { error=1; goto err; /* purecov: inspected */ @@ -1045,7 +1047,7 @@ flush_ft_buf(MI_SORT_PARAM *info) if (info->sort_info->ft_buf) { err=sort_ft_buf_flush(info); - my_free((gptr)info->sort_info->ft_buf, MYF(0)); + my_free((uchar*)info->sort_info->ft_buf, MYF(0)); info->sort_info->ft_buf=0; } return err; diff --git a/storage/myisam/sp_defs.h b/storage/myisam/sp_defs.h index 11254d16c97..187ec62b2a3 100644 --- a/storage/myisam/sp_defs.h +++ b/storage/myisam/sp_defs.h @@ -40,7 +40,7 @@ enum wkbByteOrder }; uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, - const byte *record, my_off_t filepos); + const uchar *record, my_off_t filepos); #endif /*HAVE_SPATIAL*/ #endif /* _SP_DEFS_H */ diff --git a/storage/myisam/sp_key.c b/storage/myisam/sp_key.c index 34c96a219c7..3748a38ff81 100644 --- a/storage/myisam/sp_key.c +++ b/storage/myisam/sp_key.c @@ -31,25 +31,20 @@ static int sp_get_geometry_mbr(uchar *(*wkb), uchar *end, uint n_dims, double *mbr, int top); static int sp_mbr_from_wkb(uchar (*wkb), uint size, uint n_dims, double *mbr); -static void get_double(double *d, const byte *pos) -{ - float8get(*d, pos); -} - uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, - const byte *record, my_off_t filepos) + const uchar *record, my_off_t filepos) { HA_KEYSEG *keyseg; MI_KEYDEF *keyinfo = &info->s->keyinfo[keynr]; uint len = 0; - byte *pos; + uchar *pos; uint dlen; uchar *dptr; double mbr[SPDIMS * 2]; uint i; keyseg = &keyinfo->seg[-1]; - pos = (byte*)record + keyseg->start; + pos = (uchar*)record + keyseg->start; dlen = _mi_calc_blob_length(keyseg->bit_start, pos); memcpy_fixed(&dptr, pos + keyseg->bit_start, sizeof(char*)); @@ -62,48 +57,40 @@ uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, for (i = 0, keyseg = keyinfo->seg; keyseg->type; keyseg++, i++) { - uint length = keyseg->length; + uint length = keyseg->length, start= keyseg->start; + double val; + + DBUG_ASSERT(length == sizeof(double)); + DBUG_ASSERT(!(start % sizeof(double))); + DBUG_ASSERT(start < sizeof(mbr)); + DBUG_ASSERT(keyseg->type == HA_KEYTYPE_DOUBLE); - pos = ((byte*)mbr) + keyseg->start; - if (keyseg->flag & HA_SWAP_KEY) - { + val= mbr[start / sizeof (double)]; #ifdef HAVE_ISNAN - if (keyseg->type == HA_KEYTYPE_FLOAT) - { - float nr; - float4get(nr, pos); - if (isnan(nr)) - { - /* Replace NAN with zero */ - bzero(key, length); - key+= length; - continue; - } - } - else if (keyseg->type == HA_KEYTYPE_DOUBLE) - { - double nr; - get_double(&nr, pos); - if (isnan(nr)) - { - bzero(key, length); - key+= length; - continue; - } - } + if (isnan(val)) + { + bzero(key, length); + key+= length; + len+= length; + continue; + } #endif - pos += length; - while (length--) - { + + if (keyseg->flag & HA_SWAP_KEY) + { + uchar buf[sizeof(double)]; + + float8store(buf, val); + pos= &buf[length]; + while (pos > buf) *key++ = *--pos; - } } else { - memcpy((byte*)key, pos, length); - key += keyseg->length; + float8store((uchar *)key, val); + key += length; } - len += keyseg->length; + len+= length; } _mi_dpointer(info, key, filepos); return len; @@ -141,13 +128,13 @@ static int sp_add_point_to_mbr(uchar *(*wkb), uchar *end, uint n_dims, { if ((*wkb) > end - 8) return -1; - get_double(&ord, (const byte*) *wkb); + float8get(ord, (const uchar*) *wkb); (*wkb)+= 8; if (ord < *mbr) - float8store((char*) mbr, ord); + *mbr= ord; mbr++; if (ord > *mbr) - float8store((char*) mbr, ord); + *mbr= ord; mbr++; } return 0; diff --git a/storage/myisam/sp_test.c b/storage/myisam/sp_test.c index c7226589811..dee32ba423e 100644 --- a/storage/myisam/sp_test.c +++ b/storage/myisam/sp_test.c @@ -24,11 +24,11 @@ #define MAX_REC_LENGTH 1024 #define KEYALG HA_KEY_ALG_RTREE -static void create_linestring(char *record,uint rownr); -static void print_record(char * record,my_off_t offs,const char * tail); +static void create_linestring(uchar *record,uint rownr); +static void print_record(uchar * record,my_off_t offs,const char * tail); -static void create_key(char *key,uint rownr); -static void print_key(const char *key,const char * tail); +static void create_key(uchar *key,uint rownr); +static void print_key(const uchar *key,const char * tail); static int run_test(const char *filename); static int read_with_pos(MI_INFO * file, int silent); @@ -64,9 +64,9 @@ int run_test(const char *filename) int i; int error; int row_count=0; - char record[MAX_REC_LENGTH]; - char key[MAX_REC_LENGTH]; - char read_record[MAX_REC_LENGTH]; + uchar record[MAX_REC_LENGTH]; + uchar key[MAX_REC_LENGTH]; + uchar read_record[MAX_REC_LENGTH]; int upd=10; ha_rows hrows; @@ -255,7 +255,7 @@ int run_test(const char *filename) max_range.key= record+1; max_range.length= 1000; /* Big enough */ max_range.flag= HA_READ_KEY_EXACT; - hrows= mi_records_in_range(file,0, &min_range, &max_range); + hrows= mi_records_in_range(file, 0, &min_range, &max_range); printf(" %ld rows\n", (long) hrows); if (mi_close(file)) goto err; @@ -272,7 +272,7 @@ static int read_with_pos (MI_INFO * file,int silent) { int error; int i; - char read_record[MAX_REC_LENGTH]; + uchar read_record[MAX_REC_LENGTH]; int rows=0; if (!silent) @@ -300,7 +300,7 @@ static int read_with_pos (MI_INFO * file,int silent) #ifdef NOT_USED -static void bprint_record(char * record, +static void bprint_record(uchar * record, my_off_t offs __attribute__((unused)), const char * tail) { @@ -319,9 +319,9 @@ static void bprint_record(char * record, #endif -static void print_record(char * record, my_off_t offs,const char * tail) +static void print_record(uchar * record, my_off_t offs,const char * tail) { - char *pos; + uchar *pos; char *ptr; uint len; @@ -341,7 +341,7 @@ static void print_record(char * record, my_off_t offs,const char * tail) #ifdef NOT_USED -static void create_point(char *record,uint rownr) +static void create_point(uchar *record,uint rownr) { uint tmp; char *ptr; @@ -368,11 +368,11 @@ static void create_point(char *record,uint rownr) #endif -static void create_linestring(char *record,uint rownr) +static void create_linestring(uchar *record,uint rownr) { uint tmp; char *ptr; - char *pos=record; + uchar *pos= record; double x[200]; int i,j; int npoints=2; @@ -396,21 +396,21 @@ static void create_linestring(char *record,uint rownr) } -static void create_key(char *key,uint rownr) +static void create_key(uchar *key,uint rownr) { double c=rownr; - char *pos; + uchar *pos; uint i; bzero(key,MAX_REC_LENGTH); - for ( pos=key, i=0; i<2*SPDIMS; i++) + for (pos=key, i=0; i<2*SPDIMS; i++) { float8store(pos,c); pos+=sizeof(c); } } -static void print_key(const char *key,const char * tail) +static void print_key(const uchar *key,const char * tail) { double c; uint i; diff --git a/storage/myisammrg/CMakeLists.txt b/storage/myisammrg/CMakeLists.txt index 8c8c8bcf9fb..848f2dfea43 100644..100755 --- a/storage/myisammrg/CMakeLists.txt +++ b/storage/myisammrg/CMakeLists.txt @@ -20,9 +20,14 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(myisammrg myrg_close.c myrg_create.c myrg_delete.c myrg_extra.c myrg_info.c + +SET(MYISAMMRG_SOURCES myrg_close.c myrg_create.c myrg_delete.c myrg_extra.c myrg_info.c ha_myisammrg.cc myrg_locking.c myrg_open.c myrg_panic.c myrg_queue.c myrg_range.c myrg_rfirst.c myrg_rkey.c myrg_rlast.c myrg_rnext.c myrg_rnext_same.c myrg_rprev.c myrg_rrnd.c myrg_rsame.c myrg_static.c myrg_update.c myrg_write.c) + +IF(NOT SOURCE_SUBLIBS) + ADD_LIBRARY(myisammrg ${MYISAMMRG_SOURCES}) +ENDIF(NOT SOURCE_SUBLIBS) diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 7df81a4802f..8a914e8a2de 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -30,9 +30,6 @@ ** MyISAM MERGE tables *****************************************************************************/ -static handler *myisammrg_create_handler(TABLE_SHARE *table, - MEM_ROOT *mem_root); - static handler *myisammrg_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) @@ -49,6 +46,30 @@ static const char *ha_myisammrg_exts[] = { ".MRG", NullS }; +extern int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, + MI_COLUMNDEF **recinfo_out, uint *records_out); +extern int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, + uint t1_keys, uint t1_recs, + MI_KEYDEF *t2_keyinfo, MI_COLUMNDEF *t2_recinfo, + uint t2_keys, uint t2_recs, bool strict); +static void split_file_name(const char *file_name, + LEX_STRING *db, LEX_STRING *name); + + +extern "C" void myrg_print_wrong_table(const char *table_name) +{ + LEX_STRING db, name; + char buf[FN_REFLEN]; + split_file_name(table_name, &db, &name); + memcpy(buf, db.str, db.length); + buf[db.length]= '.'; + memcpy(buf + db.length + 1, name.str, name.length); + buf[db.length + name.length + 1]= 0; + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ADMIN_WRONG_MRG_TABLE, ER(ER_ADMIN_WRONG_MRG_TABLE), + buf); +} + const char **ha_myisammrg::bas_ext() const { @@ -70,6 +91,12 @@ const char *ha_myisammrg::index_type(uint key_number) int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) { + MI_KEYDEF *keyinfo; + MI_COLUMNDEF *recinfo; + MYRG_TABLE *u_table; + uint recs; + uint keys= table->s->keys; + int error; char name_buff[FN_REFLEN]; DBUG_PRINT("info", ("ha_myisammrg::open")); @@ -93,18 +120,52 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) { DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu", table->s->reclength, stats.mean_rec_length)); + if (test_if_locked & HA_OPEN_FOR_REPAIR) + myrg_print_wrong_table(file->open_tables->table->filename); + error= HA_ERR_WRONG_MRG_TABLE_DEF; + goto err; + } + if ((error= table2myisam(table, &keyinfo, &recinfo, &recs))) + { + /* purecov: begin inspected */ + DBUG_PRINT("error", ("Failed to convert TABLE object to MyISAM " + "key and column definition")); goto err; + /* purecov: end */ + } + for (u_table= file->open_tables; u_table < file->end_table; u_table++) + { + if (check_definition(keyinfo, recinfo, keys, recs, + u_table->table->s->keyinfo, u_table->table->s->rec, + u_table->table->s->base.keys, + u_table->table->s->base.fields, false)) + { + error= HA_ERR_WRONG_MRG_TABLE_DEF; + if (test_if_locked & HA_OPEN_FOR_REPAIR) + myrg_print_wrong_table(u_table->table->filename); + else + { + my_free((uchar*) recinfo, MYF(0)); + goto err; + } + } } + my_free((uchar*) recinfo, MYF(0)); + if (error == HA_ERR_WRONG_MRG_TABLE_DEF) + goto err; #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 /* Merge table has more than 2G rows */ if (table->s->crashed) + { + error= HA_ERR_WRONG_MRG_TABLE_DEF; goto err; + } #endif return (0); err: myrg_close(file); file=0; - return (my_errno= HA_ERR_WRONG_MRG_TABLE_DEF); + return (my_errno= error); } int ha_myisammrg::close(void) @@ -112,9 +173,9 @@ int ha_myisammrg::close(void) return myrg_close(file); } -int ha_myisammrg::write_row(byte * buf) +int ha_myisammrg::write_row(uchar * buf) { - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables) return (HA_ERR_TABLE_READONLY); @@ -130,92 +191,87 @@ int ha_myisammrg::write_row(byte * buf) return myrg_write(file,buf); } -int ha_myisammrg::update_row(const byte * old_data, byte * new_data) +int ha_myisammrg::update_row(const uchar * old_data, uchar * new_data) { - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); } -int ha_myisammrg::delete_row(const byte * buf) +int ha_myisammrg::delete_row(const uchar * buf) { - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); return myrg_delete(file,buf); } -int ha_myisammrg::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_myisammrg::index_read_map(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=myrg_rkey(file,buf,active_index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error=myrg_rkey(file,buf,active_index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) +int ha_myisammrg::index_read_idx_map(uchar * buf, uint index, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=myrg_rkey(file,buf,index, key, key_len, find_flag); + ha_statistic_increment(&SSV::ha_read_key_count); + int error=myrg_rkey(file,buf,index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) +int ha_myisammrg::index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); - int error=myrg_rkey(file,buf,active_index, key, key_len, + ha_statistic_increment(&SSV::ha_read_key_count); + int error=myrg_rkey(file,buf,active_index, key, keypart_map, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_next(byte * buf) +int ha_myisammrg::index_next(uchar * buf) { - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=myrg_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_prev(byte * buf) +int ha_myisammrg::index_prev(uchar * buf) { - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=myrg_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_first(byte * buf) +int ha_myisammrg::index_first(uchar * buf) { - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=myrg_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_last(byte * buf) +int ha_myisammrg::index_last(uchar * buf) { - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=myrg_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::index_next_same(byte * buf, - const byte *key __attribute__((unused)), +int ha_myisammrg::index_next_same(uchar * buf, + const uchar *key __attribute__((unused)), uint length __attribute__((unused))) { - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=myrg_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -228,29 +284,27 @@ int ha_myisammrg::rnd_init(bool scan) } -int ha_myisammrg::rnd_next(byte *buf) +int ha_myisammrg::rnd_next(uchar *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -int ha_myisammrg::rnd_pos(byte * buf, byte *pos) +int ha_myisammrg::rnd_pos(uchar * buf, uchar *pos) { - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } -void ha_myisammrg::position(const byte *record) +void ha_myisammrg::position(const uchar *record) { - ulonglong position= myrg_position(file); - my_store_ptr(ref, ref_length, (my_off_t) position); + ulonglong row_position= myrg_position(file); + my_store_ptr(ref, ref_length, (my_off_t) row_position); } @@ -263,24 +317,23 @@ ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key, int ha_myisammrg::info(uint flag) { - MYMERGE_INFO info; - (void) myrg_status(file,&info,flag); + MYMERGE_INFO mrg_info; + (void) myrg_status(file,&mrg_info,flag); /* The following fails if one has not compiled MySQL with -DBIG_TABLES and one has more than 2^32 rows in the merge tables. */ - stats.records = (ha_rows) info.records; - stats.deleted = (ha_rows) info.deleted; + stats.records = (ha_rows) mrg_info.records; + stats.deleted = (ha_rows) mrg_info.deleted; #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 - if ((info.records >= (ulonglong) 1 << 32) || - (info.deleted >= (ulonglong) 1 << 32)) + if ((mrg_info.records >= (ulonglong) 1 << 32) || + (mrg_info.deleted >= (ulonglong) 1 << 32)) table->s->crashed= 1; #endif - stats.data_file_length=info.data_file_length; - errkey = info.errkey; + stats.data_file_length= mrg_info.data_file_length; + errkey= mrg_info.errkey; table->s->keys_in_use.set_prefix(table->s->keys); - table->s->db_options_in_use= info.options; - stats.mean_rec_length= info.reclength; + stats.mean_rec_length= mrg_info.reclength; /* The handler::block_size is used all over the code in index scan cost @@ -310,7 +363,7 @@ int ha_myisammrg::info(uint flag) #endif if (flag & HA_STATUS_CONST) { - if (table->s->key_parts && info.rec_per_key) + if (table->s->key_parts && mrg_info.rec_per_key) { #ifdef HAVE_purify /* @@ -323,7 +376,7 @@ int ha_myisammrg::info(uint flag) sizeof(table->key_info[0].rec_per_key) * table->s->key_parts); #endif memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, + (char*) mrg_info.rec_per_key, sizeof(table->key_info[0].rec_per_key) * min(file->keys, table->s->key_parts)); } @@ -427,6 +480,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) { TABLE_LIST *ptr; LEX_STRING db, name; + LINT_INIT(db.str); if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) goto err; @@ -437,8 +491,8 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) goto err; create_info->merge_list.elements++; - (*create_info->merge_list.next) = (byte*) ptr; - create_info->merge_list.next= (byte**) &ptr->next_local; + (*create_info->merge_list.next) = (uchar*) ptr; + create_info->merge_list.next= (uchar**) &ptr->next_local; } *create_info->merge_list.next=0; } @@ -537,6 +591,8 @@ void ha_myisammrg::append_create_info(String *packet) open_table++) { LEX_STRING db, name; + LINT_INIT(db.str); + split_file_name(open_table->table->filename, &db, &name); if (open_table != first) packet->append(','); @@ -564,6 +620,13 @@ bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_NO; } + +int ha_myisammrg::check(THD* thd, HA_CHECK_OPT* check_opt) +{ + return HA_ADMIN_OK; +} + + extern int myrg_panic(enum ha_panic_function flag); int myisammrg_panic(handlerton *hton, ha_panic_function flag) { @@ -576,7 +639,6 @@ static int myisammrg_init(void *p) myisammrg_hton= (handlerton *)p; - myisammrg_hton->state= SHOW_OPTION_YES; myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM; myisammrg_hton->create= myisammrg_create_handler; myisammrg_hton->panic= myisammrg_panic; diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index ffa55673ad1..91aabe277f7 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -35,9 +35,10 @@ class ha_myisammrg: public handler ulonglong table_flags() const { return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_NO_TRANSACTIONS | + HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED | - HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE | - HA_CAN_BIT_FIELD | HA_NO_COPY_ON_ALTER); + HA_ANY_INDEX_MAY_BE_UNIQUE | HA_CAN_BIT_FIELD | + HA_NO_COPY_ON_ALTER); } ulong index_flags(uint inx, uint part, bool all_parts) const { @@ -53,23 +54,24 @@ class ha_myisammrg: public handler int open(const char *name, int mode, uint test_if_locked); int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int index_next_same(byte *buf, const byte *key, uint keylen); + int write_row(uchar * buf); + int update_row(const uchar * old_data, uchar * new_data); + int delete_row(const uchar * buf); + int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_idx_map(uchar *buf, uint index, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map); + int index_next(uchar * buf); + int index_prev(uchar * buf); + int index_first(uchar * buf); + int index_last(uchar * buf); + int index_next_same(uchar *buf, const uchar *key, uint keylen); int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); + int rnd_next(uchar *buf); + int rnd_pos(uchar * buf, uchar *pos); + void position(const uchar *record); ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int info(uint); int reset(void); @@ -84,4 +86,5 @@ class ha_myisammrg: public handler void append_create_info(String *packet); MYRG_INFO *myrg_info() { return file; } bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); + int check(THD* thd, HA_CHECK_OPT* check_opt); }; diff --git a/storage/myisammrg/myrg_close.c b/storage/myisammrg/myrg_close.c index 971a83928b1..baae24634b3 100644 --- a/storage/myisammrg/myrg_close.c +++ b/storage/myisammrg/myrg_close.c @@ -30,7 +30,7 @@ int myrg_close(MYRG_INFO *info) pthread_mutex_lock(&THR_LOCK_open); myrg_open_list=list_delete(myrg_open_list,&info->open_list); pthread_mutex_unlock(&THR_LOCK_open); - my_free((gptr) info,MYF(0)); + my_free((uchar*) info,MYF(0)); if (error) { DBUG_RETURN(my_errno=error); diff --git a/storage/myisammrg/myrg_create.c b/storage/myisammrg/myrg_create.c index c4e91e7b29b..df81b730bfd 100644 --- a/storage/myisammrg/myrg_create.c +++ b/storage/myisammrg/myrg_create.c @@ -46,7 +46,7 @@ int myrg_create(const char *name, const char **table_names, fn_same(buff,name,4); *(end=strend(buff))='\n'; end[1]=0; - if (my_write(file,buff,(uint) (end-buff+1), + if (my_write(file,(char*) buff,(uint) (end-buff+1), MYF(MY_WME | MY_NABP))) goto err; } @@ -55,7 +55,7 @@ int myrg_create(const char *name, const char **table_names, { end=strxmov(buff,"#INSERT_METHOD=", get_type(&merge_insert_method,insert_method-1),"\n",NullS); - if (my_write(file,buff,(uint) (end-buff),MYF(MY_WME | MY_NABP))) + if (my_write(file, (uchar*) buff,(uint) (end-buff),MYF(MY_WME | MY_NABP))) goto err; } if (my_close(file,MYF(0))) diff --git a/storage/myisammrg/myrg_def.h b/storage/myisammrg/myrg_def.h index 344bd4edd3c..9c69da1424d 100644 --- a/storage/myisammrg/myrg_def.h +++ b/storage/myisammrg/myrg_def.h @@ -28,5 +28,8 @@ extern pthread_mutex_t THR_LOCK_open; #endif int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag); -int _myrg_mi_read_record(MI_INFO *info, byte *buf); - +int _myrg_mi_read_record(MI_INFO *info, uchar *buf); +#ifdef __cplusplus +extern "C" +#endif +void myrg_print_wrong_table(const char *table_name); diff --git a/storage/myisammrg/myrg_delete.c b/storage/myisammrg/myrg_delete.c index f9604f66885..93d45198b36 100644 --- a/storage/myisammrg/myrg_delete.c +++ b/storage/myisammrg/myrg_delete.c @@ -17,7 +17,7 @@ #include "myrg_def.h" -int myrg_delete(MYRG_INFO *info, const byte *record) +int myrg_delete(MYRG_INFO *info, const uchar *record) { if (!info->current_table) return (my_errno= HA_ERR_NO_ACTIVE_RECORD); diff --git a/storage/myisammrg/myrg_locking.c b/storage/myisammrg/myrg_locking.c index a07833bc829..4f1e3f844a1 100644 --- a/storage/myisammrg/myrg_locking.c +++ b/storage/myisammrg/myrg_locking.c @@ -37,7 +37,15 @@ int myrg_lock_database(MYRG_INFO *info, int lock_type) (file->table)->owned_by_merge = TRUE; #endif if ((new_error=mi_lock_database(file->table,lock_type))) + { error=new_error; + if (lock_type != F_UNLCK) + { + while (--file >= info->open_tables) + mi_lock_database(file->table, F_UNLCK); + break; + } + } } return(error); } diff --git a/storage/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c index 3dbb605463e..500d3a29327 100644 --- a/storage/myisammrg/myrg_open.c +++ b/storage/myisammrg/myrg_open.c @@ -40,6 +40,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) IO_CACHE file; MI_INFO *isam=0; uint found_merge_insert_method= 0; + size_t name_buff_length; DBUG_ENTER("myrg_open"); LINT_INIT(key_parts); @@ -48,13 +49,13 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT, MY_UNPACK_FILENAME|MY_APPEND_EXT), O_RDONLY | O_SHARE,MYF(0))) < 0) - goto err; - errpos=1; - if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0, + goto err; + errpos=1; + if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) - goto err; - errpos=2; - dir_length=dirname_part(name_buff,name); + goto err; + errpos=2; + dir_length=dirname_part(name_buff, name, &name_buff_length); while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') @@ -91,6 +92,11 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0)))) { my_errno= HA_ERR_WRONG_MRG_TABLE_DEF; + if (handle_locking & HA_OPEN_FOR_REPAIR) + { + myrg_print_wrong_table(buff); + continue; + } goto err; } if (!m_info) /* First file */ @@ -119,6 +125,11 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) if (m_info->reclength != isam->s->base.reclength) { my_errno=HA_ERR_WRONG_MRG_TABLE_DEF; + if (handle_locking & HA_OPEN_FOR_REPAIR) + { + myrg_print_wrong_table(buff); + continue; + } goto err; } m_info->options|= isam->s->options; @@ -132,6 +143,8 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) m_info->tables); } + if (my_errno == HA_ERR_WRONG_MRG_TABLE_DEF) + goto err; if (!m_info && !(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO), MYF(MY_WME | MY_ZEROFILL)))) goto err; diff --git a/storage/myisammrg/myrg_queue.c b/storage/myisammrg/myrg_queue.c index 1d252207db1..d2579053784 100644 --- a/storage/myisammrg/myrg_queue.c +++ b/storage/myisammrg/myrg_queue.c @@ -15,7 +15,7 @@ #include "myrg_def.h" -static int queue_key_cmp(void *keyseg, byte *a, byte *b) +static int queue_key_cmp(void *keyseg, uchar *a, uchar *b) { MYRG_TABLE *ma= (MYRG_TABLE *)a; MYRG_TABLE *mb= (MYRG_TABLE *)b; @@ -69,7 +69,7 @@ int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag) return error; } -int _myrg_mi_read_record(MI_INFO *info, byte *buf) +int _myrg_mi_read_record(MI_INFO *info, uchar *buf) { if (!(*info->read_record)(info,info->lastpos,buf)) { diff --git a/storage/myisammrg/myrg_rfirst.c b/storage/myisammrg/myrg_rfirst.c index 80736537d02..9d7b0f9e83f 100644 --- a/storage/myisammrg/myrg_rfirst.c +++ b/storage/myisammrg/myrg_rfirst.c @@ -17,7 +17,7 @@ /* Read first row according to specific key */ -int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx) +int myrg_rfirst(MYRG_INFO *info, uchar *buf, int inx) { MYRG_TABLE *table; MI_INFO *mi; @@ -35,7 +35,7 @@ int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx) return err; } /* adding to queue */ - queue_insert(&(info->by_key),(byte *)table); + queue_insert(&(info->by_key),(uchar *)table); } /* We have done a read in all tables */ info->last_used_table=table; diff --git a/storage/myisammrg/myrg_rkey.c b/storage/myisammrg/myrg_rkey.c index f7b7f082019..8e7886f5a43 100644 --- a/storage/myisammrg/myrg_rkey.c +++ b/storage/myisammrg/myrg_rkey.c @@ -35,17 +35,19 @@ SerG */ -int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, - uint key_len, enum ha_rkey_function search_flag) +int myrg_rkey(MYRG_INFO *info,uchar *buf,int inx, const uchar *key, + key_part_map keypart_map, enum ha_rkey_function search_flag) { - byte *key_buff; + uchar *key_buff; uint pack_key_length; + uint16 last_used_keyseg; MYRG_TABLE *table; MI_INFO *mi; int err; DBUG_ENTER("myrg_rkey"); LINT_INIT(key_buff); LINT_INIT(pack_key_length); + LINT_INIT(last_used_keyseg); if (_myrg_init_queue(info,inx,search_flag)) DBUG_RETURN(my_errno); @@ -56,15 +58,17 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, if (table == info->open_tables) { - err=mi_rkey(mi,0,inx,key,key_len,search_flag); + err=mi_rkey(mi, 0, inx, key, keypart_map, search_flag); /* Get the saved packed key and packed key length. */ - key_buff=(byte*) mi->lastkey+mi->s->base.max_key_length; + key_buff=(uchar*) mi->lastkey+mi->s->base.max_key_length; pack_key_length=mi->pack_key_length; + last_used_keyseg= mi->last_used_keyseg; } else { mi->once_flags|= USE_PACKED_KEYS; - err=mi_rkey(mi,0,inx,key_buff,pack_key_length,search_flag); + mi->last_used_keyseg= last_used_keyseg; + err=mi_rkey(mi, 0, inx, key_buff, pack_key_length, search_flag); } info->last_used_table=table+1; @@ -76,7 +80,7 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, DBUG_RETURN(err); } /* adding to queue */ - queue_insert(&(info->by_key),(byte *)table); + queue_insert(&(info->by_key),(uchar *)table); } @@ -88,6 +92,6 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, mi->once_flags|= RRND_PRESERVE_LASTINX; DBUG_PRINT("info", ("using table no: %d", (int) (info->current_table - info->open_tables + 1))); - DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length); + DBUG_DUMP("result key", (uchar*) mi->lastkey, mi->lastkey_length); DBUG_RETURN(_myrg_mi_read_record(mi,buf)); } diff --git a/storage/myisammrg/myrg_rlast.c b/storage/myisammrg/myrg_rlast.c index f364bf9b32f..8086a2f8104 100644 --- a/storage/myisammrg/myrg_rlast.c +++ b/storage/myisammrg/myrg_rlast.c @@ -17,7 +17,7 @@ /* Read last row with the same key as the previous read. */ -int myrg_rlast(MYRG_INFO *info, byte *buf, int inx) +int myrg_rlast(MYRG_INFO *info, uchar *buf, int inx) { MYRG_TABLE *table; MI_INFO *mi; @@ -35,7 +35,7 @@ int myrg_rlast(MYRG_INFO *info, byte *buf, int inx) return err; } /* adding to queue */ - queue_insert(&(info->by_key),(byte *)table); + queue_insert(&(info->by_key),(uchar *)table); } /* We have done a read in all tables */ info->last_used_table=table; diff --git a/storage/myisammrg/myrg_rnext.c b/storage/myisammrg/myrg_rnext.c index de1aa4df4b6..82d5cbf38b1 100644 --- a/storage/myisammrg/myrg_rnext.c +++ b/storage/myisammrg/myrg_rnext.c @@ -19,7 +19,7 @@ Read next row with the same key as previous read */ -int myrg_rnext(MYRG_INFO *info, byte *buf, int inx) +int myrg_rnext(MYRG_INFO *info, uchar *buf, int inx) { int err; MI_INFO *mi; @@ -42,7 +42,7 @@ int myrg_rnext(MYRG_INFO *info, byte *buf, int inx) else { /* Found here, adding to queue */ - queue_top(&(info->by_key))=(byte *)(info->current_table); + queue_top(&(info->by_key))=(uchar *)(info->current_table); queue_replaced(&(info->by_key)); } diff --git a/storage/myisammrg/myrg_rnext_same.c b/storage/myisammrg/myrg_rnext_same.c index 9c6b522ee8a..ad7bbfb0f6e 100644 --- a/storage/myisammrg/myrg_rnext_same.c +++ b/storage/myisammrg/myrg_rnext_same.c @@ -16,7 +16,7 @@ #include "myrg_def.h" -int myrg_rnext_same(MYRG_INFO *info, byte *buf) +int myrg_rnext_same(MYRG_INFO *info, uchar *buf) { int err; MI_INFO *mi; @@ -39,7 +39,7 @@ int myrg_rnext_same(MYRG_INFO *info, byte *buf) else { /* Found here, adding to queue */ - queue_top(&(info->by_key))=(byte *)(info->current_table); + queue_top(&(info->by_key))=(uchar *)(info->current_table); queue_replaced(&(info->by_key)); } diff --git a/storage/myisammrg/myrg_rprev.c b/storage/myisammrg/myrg_rprev.c index b1b86a93fad..66c94974940 100644 --- a/storage/myisammrg/myrg_rprev.c +++ b/storage/myisammrg/myrg_rprev.c @@ -19,7 +19,7 @@ Read previous row with the same key as previous read */ -int myrg_rprev(MYRG_INFO *info, byte *buf, int inx) +int myrg_rprev(MYRG_INFO *info, uchar *buf, int inx) { int err; MI_INFO *mi; @@ -42,7 +42,7 @@ int myrg_rprev(MYRG_INFO *info, byte *buf, int inx) else { /* Found here, adding to queue */ - queue_top(&(info->by_key))=(byte *)(info->current_table); + queue_top(&(info->by_key))=(uchar *)(info->current_table); queue_replaced(&(info->by_key)); } diff --git a/storage/myisammrg/myrg_rrnd.c b/storage/myisammrg/myrg_rrnd.c index 55e72b2170d..b598563680c 100644 --- a/storage/myisammrg/myrg_rrnd.c +++ b/storage/myisammrg/myrg_rrnd.c @@ -30,7 +30,7 @@ static MYRG_TABLE *find_table(MYRG_TABLE *start,MYRG_TABLE *end,ulonglong pos); HA_ERR_END_OF_FILE = EOF. */ -int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos) +int myrg_rrnd(MYRG_INFO *info,uchar *buf,ulonglong filepos) { int error; MI_INFO *isam_info; @@ -47,7 +47,7 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos) } isam_info=(info->current_table=info->open_tables)->table; if (info->cache_in_use) - mi_extra(isam_info,HA_EXTRA_CACHE,(byte*) &info->cache_size); + mi_extra(isam_info,HA_EXTRA_CACHE,(uchar*) &info->cache_size); filepos=isam_info->s->pack.header_length; isam_info->lastinx= (uint) -1; /* Can't forward or backward */ } @@ -60,20 +60,20 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos) for (;;) { isam_info->update&= HA_STATE_CHANGED; - if ((error=(*isam_info->s->read_rnd)(isam_info,(byte*) buf, + if ((error=(*isam_info->s->read_rnd)(isam_info,(uchar*) buf, (my_off_t) filepos,1)) != HA_ERR_END_OF_FILE) DBUG_RETURN(error); if (info->cache_in_use) mi_extra(info->current_table->table, HA_EXTRA_NO_CACHE, - (byte*) &info->cache_size); + (uchar*) &info->cache_size); if (info->current_table+1 == info->end_table) DBUG_RETURN(HA_ERR_END_OF_FILE); info->current_table++; info->last_used_table=info->current_table; if (info->cache_in_use) mi_extra(info->current_table->table, HA_EXTRA_CACHE, - (byte*) &info->cache_size); + (uchar*) &info->cache_size); info->current_table->file_offset= info->current_table[-1].file_offset+ info->current_table[-1].table->state->data_file_length; @@ -88,7 +88,7 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos) isam_info=info->current_table->table; isam_info->update&= HA_STATE_CHANGED; DBUG_RETURN((*isam_info->s->read_rnd) - (isam_info, (byte*) buf, + (isam_info, (uchar*) buf, (my_off_t) (filepos - info->current_table->file_offset), 0)); } diff --git a/storage/myisammrg/myrg_rsame.c b/storage/myisammrg/myrg_rsame.c index 56b16c0aa3c..2f7523759dc 100644 --- a/storage/myisammrg/myrg_rsame.c +++ b/storage/myisammrg/myrg_rsame.c @@ -15,7 +15,7 @@ #include "myrg_def.h" -int myrg_rsame(MYRG_INFO *info,byte *record,int inx) +int myrg_rsame(MYRG_INFO *info,uchar *record,int inx) { if (inx) /* not yet used, should be 0 */ return (my_errno=HA_ERR_WRONG_INDEX); diff --git a/storage/myisammrg/myrg_update.c b/storage/myisammrg/myrg_update.c index ba667d69f12..5d883be8484 100644 --- a/storage/myisammrg/myrg_update.c +++ b/storage/myisammrg/myrg_update.c @@ -17,7 +17,7 @@ #include "myrg_def.h" -int myrg_update(register MYRG_INFO *info,const byte *oldrec, byte *newrec) +int myrg_update(register MYRG_INFO *info,const uchar *oldrec, uchar *newrec) { if (!info->current_table) return (my_errno=HA_ERR_NO_ACTIVE_RECORD); diff --git a/storage/myisammrg/myrg_write.c b/storage/myisammrg/myrg_write.c index ed0a4a7996a..27534df2821 100644 --- a/storage/myisammrg/myrg_write.c +++ b/storage/myisammrg/myrg_write.c @@ -17,7 +17,7 @@ #include "myrg_def.h" -int myrg_write(register MYRG_INFO *info, byte *rec) +int myrg_write(register MYRG_INFO *info, uchar *rec) { /* [phi] MERGE_WRITE_DISABLED is handled by the else case */ if (info->merge_insert_method == MERGE_INSERT_TO_FIRST) diff --git a/storage/ndb/MAINTAINERS b/storage/ndb/MAINTAINERS new file mode 100644 index 00000000000..d1547d48234 --- /dev/null +++ b/storage/ndb/MAINTAINERS @@ -0,0 +1,163 @@ +MySQL Cluster MAINTAINERS +------------------------- + +This is a list of knowledgable people in parts of the NDB code. + +In changing that area of code, you probably want to talk to the +people who know a lot about it to look over the patch. + +When sending patches and queries, always CC the mailing list. + +If no list specified, assume internals@lists.mysql.com + +P: Person +M: Mail +L: Mailing list +W: Web page with status/info +C: Comment +SRC: Source directory (relative to this directory) +T: SCM tree type and location +S: Status, one of: + + Supported: Somebody is paid to maintain this. + Maintained: Not their primary job, but maintained. + Orphan: No current obvious maintainer. + Obsolete: Replaced by something else. + +------------------------------------------------------------- + +Binlog Injector +SRC: ha_ndbcluster_binlog.cc +C: see also row based replication +P: Stewart Smith +M: stewart@mysql.com +C: Original author +P: Tomas Ulin +M: tomas@mysql.com +C: Lots of updates +P: Martin Skold +M: martin@mysql.com +C: Metadata ops +S: Supported + +BLOBs +SRC: ha_ndbcluster.cc +SRC: src/ndbapi/NdbBlob* +P: Pekka +M: pekka@mysql.com +S: Supported + +cpcd/cpcc +SRC: src/cw/cpcd +SRC: src/cw/cpcc +C: Maintained only as part of autotest +P: Jonas Orland +M: jonas@mysql.com +S: Maintained + +cpcc-win32 +SRC: src/cw/cpcc-win32 +S: Obsolete + +Handler +SRC: ha_ndbcluster.cc +P: Martin Skold +M: martin@mysql.com +S: Supported + +Management Server +SRC: src/mgmsrv/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +Management Client +SRC: src/mgmclient/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +Management API +SRC: src/mgmapi/ +P: Stewart Smith +M: stewart@mysql.com +S: Supported + +NDB API Examples +SRC: ndbapi-examples/ +P: Tomas Ulin +M: tomas@mysql.com +C: Originally by Lars +P: Lars Thalmann +M: lars@mysql.com +S: Maintained + +NDB API NdbRecord Examples +SRC: ndbapi-examples/ +P: Kristian Nielsen +M: knielsen@mysql.com +S: Maintained + +tsman +C: Disk Data (Table Space MANager) +SRC: src/kernel/blocks/tsman.cpp +SRC: src/kernel/blocks/tsman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +lgman +C: Disk Data (LoG MANager) +SRC: src/kernel/blocks/lgman.cpp +SRC: src/kernel/blocks/lgman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +pgman +C: Disk Data (PaGe MANager) +SRC: src/kernel/blocks/lgman.cpp +SRC: src/kernel/blocks/lgman.hpp +P: Jonas Oreland +M: jonas@mysql.com +S: Supported + +SUMA +C: SUbscription MAnager +C: Used for replication +SRC: src/kernel/blocks/suma/ +P: Tomas Ulin +P: tomas@mysql.com +P: Jonas Oreland +P: jonas@mysql.com +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +P: Martin Skold +P: mskold@mysql.com +S: Supported + +QMGR +C: Cluster (with a Q) ManaGeR +C: Heartbeats etc +SRC: src/kernel/blocks/qmgr +S: Supported + +NDBFS +C: NDB FileSystem +C: File System abstraction +SRC: src/kernel/blocks/ndbfs +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +S: Supported + +TRIX +C: TRiggers and IndeXs (but only online Index build) +SRC: src/kernel/blocks/trix +S: Supported + diff --git a/storage/ndb/config/common.mk.am b/storage/ndb/config/common.mk.am index 5ed3855f31e..9633a52e91f 100644 --- a/storage/ndb/config/common.mk.am +++ b/storage/ndb/config/common.mk.am @@ -25,3 +25,5 @@ INCLUDES = $(INCLUDES_LOC) LDADD = $(LDADD_LOC) DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS) NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC) +NDB_AM_CXXFLAGS:= $(AM_CXXFLAGS) +AM_CXXFLAGS=$(NDB_AM_CXXFLAGS) $(NDB_CXXFLAGS) diff --git a/storage/ndb/include/Makefile.am b/storage/ndb/include/Makefile.am index bf8fe392072..9e6ad016d75 100644 --- a/storage/ndb/include/Makefile.am +++ b/storage/ndb/include/Makefile.am @@ -45,6 +45,7 @@ ndbapi/ndberror.h mgmapiinclude_HEADERS = \ mgmapi/mgmapi.h \ +mgmapi/mgmapi_error.h \ mgmapi/mgmapi_debug.h \ mgmapi/mgmapi_config_parameters.h \ mgmapi/mgmapi_config_parameters_debug.h \ diff --git a/storage/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp index 7e47dbf59db..8ae96162a48 100644 --- a/storage/ndb/include/debugger/EventLogger.hpp +++ b/storage/ndb/include/debugger/EventLogger.hpp @@ -173,5 +173,5 @@ private: STATIC_CONST(MAX_TEXT_LENGTH = 256); }; - +extern void getRestartAction(Uint32 action, BaseString &str); #endif diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp index a8b811e4efa..613e3d19d1b 100644 --- a/storage/ndb/include/kernel/AttributeHeader.hpp +++ b/storage/ndb/include/kernel/AttributeHeader.hpp @@ -45,7 +45,8 @@ public: STATIC_CONST( ROWID = 0xFFF6 ); STATIC_CONST( ROW_GCI = 0xFFF5 ); STATIC_CONST( FRAGMENT_VARSIZED_MEMORY = 0xFFF4 ); - + // 0xFFF3 to be used for read packed when merged + STATIC_CONST( ANY_VALUE = 0xFFF2 ); STATIC_CONST( COPY_ROWID = 0xFFF1 ); // NOTE: in 5.1 ctors and init take size in bytes diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h index fcbdedc44cc..aa0596f102a 100644 --- a/storage/ndb/include/kernel/GlobalSignalNumbers.h +++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h @@ -551,13 +551,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_ABORT_ALL_REF 446 #define GSN_ABORT_ALL_CONF 447 -#define GSN_STATISTICS_REQ 448 +/* 448 unused - formerly GSN_STATISTICS_REQ */ #define GSN_STOP_ORD 449 #define GSN_TAMPER_ORD 450 -#define GSN_SET_VAR_REQ 451 -#define GSN_SET_VAR_CONF 452 -#define GSN_SET_VAR_REF 453 -#define GSN_STATISTICS_CONF 454 +/* 451 unused - formerly GSN_SET_VAR_REQ */ +/* 452 unused - formerly GSN_SET_VAR_CONF */ +/* 453 unused - formerly GSN_SET_VAR_REF */ +/* 454 unused - formerly GSN_STATISTICS_CONF */ #define GSN_START_ORD 455 /* 457 unused */ diff --git a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp index d3dd070a62e..ed7e3929414 100644 --- a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp +++ b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp @@ -31,11 +31,10 @@ private: Uint32 data[2]; public: + ArbitTicket() {} STATIC_CONST( DataLength = 2 ); STATIC_CONST( TextLength = DataLength * 8 ); // hex digits - ArbitTicket() {} - inline void clear() { data[0] = 0; data[1] = 0; @@ -144,9 +143,9 @@ public: ArbitTicket ticket; // ticket NodeBitmask mask; // set of nodes + ArbitSignalData() {} STATIC_CONST( SignalLength = 3 + ArbitTicket::DataLength + NodeBitmask::Size ); - ArbitSignalData() {} inline bool match(ArbitSignalData& aData) const { return node == aData.node && diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp index d40f3f7d8cb..fa92af1de8c 100644 --- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp @@ -64,9 +64,10 @@ struct CreateFilegroupRef { InvalidFormat = 740, OutOfFilegroupRecords = 765, InvalidExtentSize = 764, - InvalidUndoBufferSize = 763, + InvalidUndoBufferSize = 779, NoSuchLogfileGroup = 767, - InvalidFilegroupVersion = 768 + InvalidFilegroupVersion = 768, + SingleUser = 299 }; Uint32 senderData; @@ -159,7 +160,8 @@ struct CreateFileRef { FilenameAlreadyExists = 760, OutOfFileRecords = 751, InvalidFileType = 750, - NotSupportedWhenDiskless = 775 + NotSupportedWhenDiskless = 775, + SingleUser = 299 }; Uint32 senderData; diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index bb35a31321d..3923d8e6fbf 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -141,6 +141,8 @@ public: RowGCIFlag = 150, RowChecksumFlag = 151, + SingleUserMode = 152, + TableEnd = 999, AttributeName = 1000, // String, Mandatory @@ -346,6 +348,8 @@ public: Uint32 RowGCIFlag; Uint32 RowChecksumFlag; + + Uint32 SingleUserMode; Table() {} void init(); diff --git a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp b/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp index 0125e523653..a243380246e 100644 --- a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp +++ b/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp @@ -66,7 +66,8 @@ struct DropFilegroupRef { NotMaster = 702, NoSuchFilegroup = 767, FilegroupInUse = 768, - InvalidSchemaObjectVersion = 774 + InvalidSchemaObjectVersion = 774, + SingleUser = 299 }; Uint32 senderData; @@ -152,7 +153,8 @@ struct DropFileRef { NotMaster = 702, NoSuchFile = 766, DropUndoFileNotSupported = 769, - InvalidSchemaObjectVersion = 774 + InvalidSchemaObjectVersion = 774, + SingleUser = 299 }; Uint32 senderData; diff --git a/storage/ndb/include/kernel/signaldata/Extent.hpp b/storage/ndb/include/kernel/signaldata/Extent.hpp index 88f2e394233..283ea7ba85a 100644 --- a/storage/ndb/include/kernel/signaldata/Extent.hpp +++ b/storage/ndb/include/kernel/signaldata/Extent.hpp @@ -31,7 +31,8 @@ struct AllocExtentReq { enum ErrorCode { UnmappedExtentPageIsNotImplemented = 1, - NoExtentAvailable = 1601 + NoExtentAvailable = 1601, + NoDatafile = 1602 }; union diff --git a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp index 52e52e73c36..d33f8be3650 100644 --- a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp +++ b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp @@ -53,7 +53,7 @@ class FireTrigOrd { public: STATIC_CONST( SignalLength = 8 ); STATIC_CONST( SignalWithGCILength = 9 ); - STATIC_CONST( SignalWithHashValueLength = 10 ); + STATIC_CONST( SignalLengthSuma = 11 ); private: Uint32 m_connectionPtr; @@ -66,6 +66,7 @@ private: Uint32 fragId; Uint32 m_gci; Uint32 m_hashValue; + Uint32 m_any_value; // Public methods public: Uint32 getConnectionPtr() const; @@ -86,6 +87,8 @@ public: void setGCI(Uint32); Uint32 getHashValue() const; void setHashValue(Uint32); + Uint32 getAnyValue() const; + void setAnyValue(Uint32); }; inline @@ -196,5 +199,17 @@ void FireTrigOrd::setHashValue(Uint32 flag) m_hashValue = flag; } +inline +Uint32 FireTrigOrd::getAnyValue() const +{ + return m_any_value; +} + +inline +void FireTrigOrd::setAnyValue(Uint32 any_value) +{ + m_any_value = any_value; +} + #endif diff --git a/storage/ndb/include/kernel/signaldata/ScanTab.hpp b/storage/ndb/include/kernel/signaldata/ScanTab.hpp index 571fc374eab..3d2071ca019 100644 --- a/storage/ndb/include/kernel/signaldata/ScanTab.hpp +++ b/storage/ndb/include/kernel/signaldata/ScanTab.hpp @@ -46,6 +46,7 @@ public: * Length of signal */ STATIC_CONST( StaticLength = 11 ); + STATIC_CONST( MaxTotalAttrInfo = 0xFFFF ); private: @@ -115,16 +116,16 @@ private: z = Descending (TUX) - 1 Bit 14 x = Range Scan (TUX) - 1 Bit 15 b = Scan batch - 10 Bit 16-25 (max 1023) - d = Distribution key flag - n = No disk flag + d = Distribution key flag - 1 Bit 26 + n = No disk flag - 1 Bit 9 1111111111222222222233 01234567890123456789012345678901 - pppppppplnhcktzxbbbbbbbbbb + pppppppplnhcktzxbbbbbbbbbbd */ -#define PARALLELL_SHIFT (0) -#define PARALLELL_MASK (255) +#define PARALLEL_SHIFT (0) +#define PARALLEL_MASK (255) #define LOCK_MODE_SHIFT (8) #define LOCK_MODE_MASK (1) @@ -151,13 +152,15 @@ private: #define SCAN_BATCH_MASK (1023) #define SCAN_DISTR_KEY_SHIFT (26) +#define SCAN_DISTR_KEY_MASK (1) #define SCAN_NODISK_SHIFT (9) +#define SCAN_NODISK_MASK (1) inline Uint8 ScanTabReq::getParallelism(const UintR & requestInfo){ - return (Uint8)((requestInfo >> PARALLELL_SHIFT) & PARALLELL_MASK); + return (Uint8)((requestInfo >> PARALLEL_SHIFT) & PARALLEL_MASK); } inline @@ -211,58 +214,65 @@ ScanTabReq::clearRequestInfo(UintR & requestInfo){ inline void ScanTabReq::setParallelism(UintR & requestInfo, Uint32 type){ - ASSERT_MAX(type, PARALLELL_MASK, "ScanTabReq::setParallellism"); - requestInfo |= (type << PARALLELL_SHIFT); + ASSERT_MAX(type, PARALLEL_MASK, "ScanTabReq::setParallelism"); + requestInfo= (requestInfo & ~(PARALLEL_MASK << PARALLEL_SHIFT)) | + ((type & PARALLEL_MASK) << PARALLEL_SHIFT); } inline void ScanTabReq::setLockMode(UintR & requestInfo, Uint32 mode){ ASSERT_MAX(mode, LOCK_MODE_MASK, "ScanTabReq::setLockMode"); - requestInfo |= (mode << LOCK_MODE_SHIFT); + requestInfo= (requestInfo & ~(LOCK_MODE_MASK << LOCK_MODE_SHIFT)) | + ((mode & LOCK_MODE_MASK) << LOCK_MODE_SHIFT); } inline void ScanTabReq::setHoldLockFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setHoldLockFlag"); - requestInfo |= (flag << HOLD_LOCK_SHIFT); + requestInfo= (requestInfo & ~(HOLD_LOCK_MASK << HOLD_LOCK_SHIFT)) | + ((flag & HOLD_LOCK_MASK) << HOLD_LOCK_SHIFT); } inline void ScanTabReq::setReadCommittedFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setReadCommittedFlag"); - requestInfo |= (flag << READ_COMMITTED_SHIFT); + requestInfo= (requestInfo & ~(READ_COMMITTED_MASK << READ_COMMITTED_SHIFT)) | + ((flag & READ_COMMITTED_MASK) << READ_COMMITTED_SHIFT); } inline void ScanTabReq::setRangeScanFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setRangeScanFlag"); - requestInfo |= (flag << RANGE_SCAN_SHIFT); + requestInfo= (requestInfo & ~(RANGE_SCAN_MASK << RANGE_SCAN_SHIFT)) | + ((flag & RANGE_SCAN_MASK) << RANGE_SCAN_SHIFT); } inline void ScanTabReq::setDescendingFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setDescendingFlag"); - requestInfo |= (flag << DESCENDING_SHIFT); + requestInfo= (requestInfo & ~(DESCENDING_MASK << DESCENDING_SHIFT)) | + ((flag & DESCENDING_MASK) << DESCENDING_SHIFT); } inline void ScanTabReq::setTupScanFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setTupScanFlag"); - requestInfo |= (flag << TUP_SCAN_SHIFT); + requestInfo= (requestInfo & ~(TUP_SCAN_MASK << TUP_SCAN_SHIFT)) | + ((flag & TUP_SCAN_MASK) << TUP_SCAN_SHIFT); } inline void ScanTabReq::setScanBatch(Uint32 & requestInfo, Uint32 flag){ ASSERT_MAX(flag, SCAN_BATCH_MASK, "ScanTabReq::setScanBatch"); - requestInfo &= ~(SCAN_BATCH_MASK << SCAN_BATCH_SHIFT); - requestInfo |= (flag << SCAN_BATCH_SHIFT); + requestInfo= (requestInfo & ~(SCAN_BATCH_MASK << SCAN_BATCH_SHIFT)) | + ((flag & SCAN_BATCH_MASK) << SCAN_BATCH_SHIFT); } inline @@ -275,33 +285,36 @@ inline void ScanTabReq::setKeyinfoFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag"); - requestInfo |= (flag << KEYINFO_SHIFT); + requestInfo= (requestInfo & ~(KEYINFO_MASK << KEYINFO_SHIFT)) | + ((flag & KEYINFO_MASK) << KEYINFO_SHIFT); } inline Uint8 ScanTabReq::getDistributionKeyFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> SCAN_DISTR_KEY_SHIFT) & 1); + return (Uint8)((requestInfo >> SCAN_DISTR_KEY_SHIFT) & SCAN_DISTR_KEY_MASK); } inline void ScanTabReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag"); - requestInfo |= (flag << SCAN_DISTR_KEY_SHIFT); + requestInfo= (requestInfo & ~(SCAN_DISTR_KEY_MASK << SCAN_DISTR_KEY_SHIFT)) | + ((flag & SCAN_DISTR_KEY_MASK) << SCAN_DISTR_KEY_SHIFT); } inline UintR ScanTabReq::getNoDiskFlag(const UintR & requestInfo){ - return (requestInfo >> SCAN_NODISK_SHIFT) & 1; + return (requestInfo >> SCAN_NODISK_SHIFT) & SCAN_NODISK_MASK; } inline void ScanTabReq::setNoDiskFlag(UintR & requestInfo, Uint32 flag){ ASSERT_BOOL(flag, "TcKeyReq::setNoDiskFlag"); - requestInfo |= (flag << SCAN_NODISK_SHIFT); + requestInfo= (requestInfo & ~(SCAN_NODISK_MASK << SCAN_NODISK_SHIFT)) | + ((flag & SCAN_NODISK_MASK) << SCAN_NODISK_SHIFT); } /** diff --git a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp index 072c3955ac4..94775a5f3f4 100644 --- a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp +++ b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp @@ -304,7 +304,10 @@ struct SubTableData { Uint32 tableId; Uint32 requestInfo; Uint32 logType; - Uint32 changeMask; + union { + Uint32 changeMask; + Uint32 anyValue; + }; Uint32 totalLen; static void setOperation(Uint32& ri, Uint32 val) { diff --git a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp index b8562875ef5..fd8932c3c87 100644 --- a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp +++ b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp @@ -46,7 +46,7 @@ public: */ STATIC_CONST( StaticLength = 5 ); STATIC_CONST( OperationLength = 2 ); - STATIC_CONST( SimpleReadBit = (((Uint32)1) << 31) ); + STATIC_CONST( DirtyReadBit = (((Uint32)1) << 31) ); private: diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h index 42a6b53098f..0853f5a4422 100644 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ b/storage/ndb/include/mgmapi/mgmapi.h @@ -18,11 +18,13 @@ #include "mgmapi_config_parameters.h" #include "ndb_logevent.h" +#include "mgmapi_error.h" #define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1 +#define NDB_MGM_MAX_LOGLEVEL 15 /** - * @mainpage MySQL Cluster Management API + * @section MySQL Cluster Management API * * The MySQL Cluster Management API (MGM API) is a C language API * that is used for: @@ -211,105 +213,6 @@ extern "C" { }; /** - * Error codes - */ - enum ndb_mgm_error { - /** Not an error */ - NDB_MGM_NO_ERROR = 0, - - /* Request for service errors */ - /** Supplied connectstring is illegal */ - NDB_MGM_ILLEGAL_CONNECT_STRING = 1001, - /** Supplied NdbMgmHandle is illegal */ - NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005, - /** Illegal reply from server */ - NDB_MGM_ILLEGAL_SERVER_REPLY = 1006, - /** Illegal number of nodes */ - NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007, - /** Illegal node status */ - NDB_MGM_ILLEGAL_NODE_STATUS = 1008, - /** Memory allocation error */ - NDB_MGM_OUT_OF_MEMORY = 1009, - /** Management server not connected */ - NDB_MGM_SERVER_NOT_CONNECTED = 1010, - /** Could not connect to socker */ - NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011, - /** Could not bind local address */ - NDB_MGM_BIND_ADDRESS = 1012, - - /* Alloc node id failures */ - /** Generic error, retry may succeed */ - NDB_MGM_ALLOCID_ERROR = 1101, - /** Non retriable error */ - NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102, - - /* Service errors - Start/Stop Node or System */ - /** Start failed */ - NDB_MGM_START_FAILED = 2001, - /** Stop failed */ - NDB_MGM_STOP_FAILED = 2002, - /** Restart failed */ - NDB_MGM_RESTART_FAILED = 2003, - - /* Service errors - Backup */ - /** Unable to start backup */ - NDB_MGM_COULD_NOT_START_BACKUP = 3001, - /** Unable to abort backup */ - NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002, - - /* Service errors - Single User Mode */ - /** Unable to enter single user mode */ - NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001, - /** Unable to exit single user mode */ - NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002, - - /* Usage errors */ - /** Usage error */ - NDB_MGM_USAGE_ERROR = 5001 - }; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - struct Ndb_Mgm_Error_Msg { - enum ndb_mgm_error code; - const char * msg; - }; - const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = { - { NDB_MGM_NO_ERROR, "No error" }, - - /* Request for service errors */ - { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" }, - { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" }, - { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" }, - { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" }, - { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" }, - { NDB_MGM_OUT_OF_MEMORY, "Out of memory" }, - { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" }, - { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" }, - - /* Service errors - Start/Stop Node or System */ - { NDB_MGM_START_FAILED, "Start failed" }, - { NDB_MGM_STOP_FAILED, "Stop failed" }, - { NDB_MGM_RESTART_FAILED, "Restart failed" }, - - /* Service errors - Backup */ - { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" }, - { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" }, - - /* Service errors - Single User Mode */ - { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE, - "Could not enter single user mode" }, - { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE, - "Could not exit single user mode" }, - - /* Usage errors */ - { NDB_MGM_USAGE_ERROR, - "Usage error" } - }; - const int ndb_mgm_noOfErrorMsgs = - sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg); -#endif - - /** * Status of a node in the cluster. * * Sub-structure in enum ndb_mgm_cluster_state @@ -514,6 +417,18 @@ extern "C" { int ndb_mgm_set_connectstring(NdbMgmHandle handle, const char *connect_string); + /** + * Returns the number of management servers in the connect string + * (as set by ndb_mgm_set_connectstring()). This can be used + * to help work out how long the maximum amount of time that + * ndb_mgm_connect can take. + * + * @param handle Management handle + * + * @return < 0 on error + */ + int ndb_mgm_number_of_mgmd_in_connect_string(NdbMgmHandle handle); + int ndb_mgm_set_configuration_nodeid(NdbMgmHandle handle, int nodeid); int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle); int ndb_mgm_get_connected_port(NdbMgmHandle handle); @@ -545,8 +460,7 @@ extern "C" { const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz); /** - * Sets the number of seconds to wait for connect(2) during ndb_mgm_connect - * Default is no timeout + * DEPRICATED: use ndb_mgm_set_timeout instead. * * @param handle NdbMgmHandle * @param seconds number of seconds @@ -555,9 +469,26 @@ extern "C" { int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds); /** + * Sets the number of milliseconds for timeout of network operations + * Default is 60 seconds. + * Only increments of 1000 ms are supported. No function is gaurenteed + * to return in a fraction of a second. + * + * @param handle NdbMgmHandle + * @param timeout_ms number of milliseconds + * @return zero on success + */ + int ndb_mgm_set_timeout(NdbMgmHandle handle, unsigned int timeout_ms); + + /** * Connects to a management server. Connectstring is set by * ndb_mgm_set_connectstring(). * + * The timeout value is for connect to each management server. + * Use ndb_mgm_number_of_mgmd_in_connect_string to work out + * the approximate maximum amount of time that could be spent in this + * function. + * * @param handle Management handle. * @param no_retries Number of retries to connect * (0 means connect once). diff --git a/storage/ndb/include/mgmapi/mgmapi_error.h b/storage/ndb/include/mgmapi/mgmapi_error.h new file mode 100644 index 00000000000..2d0aa1ded0f --- /dev/null +++ b/storage/ndb/include/mgmapi/mgmapi_error.h @@ -0,0 +1,121 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef MGMAPI_ERROR_H +#define MGMAPI_ERROR_H + +#ifdef __cplusplus +extern "C" { +#endif + /** + * Error codes + */ + enum ndb_mgm_error { + /** Not an error */ + NDB_MGM_NO_ERROR = 0, + + /* Request for service errors */ + /** Supplied connectstring is illegal */ + NDB_MGM_ILLEGAL_CONNECT_STRING = 1001, + /** Supplied NdbMgmHandle is illegal */ + NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005, + /** Illegal reply from server */ + NDB_MGM_ILLEGAL_SERVER_REPLY = 1006, + /** Illegal number of nodes */ + NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007, + /** Illegal node status */ + NDB_MGM_ILLEGAL_NODE_STATUS = 1008, + /** Memory allocation error */ + NDB_MGM_OUT_OF_MEMORY = 1009, + /** Management server not connected */ + NDB_MGM_SERVER_NOT_CONNECTED = 1010, + /** Could not connect to socker */ + NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011, + /** Could not bind local address */ + NDB_MGM_BIND_ADDRESS = 1012, + + /* Alloc node id failures */ + /** Generic error, retry may succeed */ + NDB_MGM_ALLOCID_ERROR = 1101, + /** Non retriable error */ + NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102, + + /* Service errors - Start/Stop Node or System */ + /** Start failed */ + NDB_MGM_START_FAILED = 2001, + /** Stop failed */ + NDB_MGM_STOP_FAILED = 2002, + /** Restart failed */ + NDB_MGM_RESTART_FAILED = 2003, + + /* Service errors - Backup */ + /** Unable to start backup */ + NDB_MGM_COULD_NOT_START_BACKUP = 3001, + /** Unable to abort backup */ + NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002, + + /* Service errors - Single User Mode */ + /** Unable to enter single user mode */ + NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001, + /** Unable to exit single user mode */ + NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002, + + /* Usage errors */ + /** Usage error */ + NDB_MGM_USAGE_ERROR = 5001 + }; + struct Ndb_Mgm_Error_Msg { + enum ndb_mgm_error code; + const char * msg; + }; + const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = { + { NDB_MGM_NO_ERROR, "No error" }, + + /* Request for service errors */ + { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" }, + { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" }, + { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" }, + { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" }, + { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" }, + { NDB_MGM_OUT_OF_MEMORY, "Out of memory" }, + { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" }, + { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" }, + + /* Service errors - Start/Stop Node or System */ + { NDB_MGM_START_FAILED, "Start failed" }, + { NDB_MGM_STOP_FAILED, "Stop failed" }, + { NDB_MGM_RESTART_FAILED, "Restart failed" }, + + /* Service errors - Backup */ + { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" }, + { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" }, + + /* Service errors - Single User Mode */ + { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE, + "Could not enter single user mode" }, + { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE, + "Could not exit single user mode" }, + + /* Usage errors */ + { NDB_MGM_USAGE_ERROR, + "Usage error" } + }; + const int ndb_mgm_noOfErrorMsgs = + sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/storage/ndb/include/mgmapi/ndbd_exit_codes.h b/storage/ndb/include/mgmapi/ndbd_exit_codes.h index 982aaf5f925..30578bdf722 100644 --- a/storage/ndb/include/mgmapi/ndbd_exit_codes.h +++ b/storage/ndb/include/mgmapi/ndbd_exit_codes.h @@ -78,8 +78,9 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_SR_RESTARTCONFLICT 2311 #define NDBD_EXIT_NO_MORE_UNDOLOG 2312 #define NDBD_EXIT_SR_UNDOLOG 2313 -#define NDBD_EXIT_SR_SCHEMAFILE 2310 #define NDBD_EXIT_SINGLE_USER_MODE 2314 +#define NDBD_EXIT_NODE_DECLARED_DEAD 2315 +#define NDBD_EXIT_SR_SCHEMAFILE 2316 #define NDBD_EXIT_MEMALLOC 2327 #define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334 #define NDBD_EXIT_TIME_QUEUE_SHORT 2335 @@ -146,6 +147,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_AFS_READ_UNDERFLOW 2816 #define NDBD_EXIT_INVALID_LCP_FILE 2352 +#define NDBD_EXIT_INSUFFICENT_NODES 2353 const char * ndbd_exit_message(int faultId, ndbd_exit_classification *cl); diff --git a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp index 221e24d0572..27a189c1563 100644 --- a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp +++ b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp @@ -28,7 +28,8 @@ class ConfigRetriever { public: ConfigRetriever(const char * _connect_string, Uint32 version, Uint32 nodeType, - const char * _bind_address = 0); + const char * _bind_address = 0, + int timeout_ms = 30000); ~ConfigRetriever(); int do_connect(int no_retries, int retry_delay_in_seconds, int verbose); diff --git a/storage/ndb/include/ndb_constants.h b/storage/ndb/include/ndb_constants.h index 8b3cb5b6cf9..e2f8e17e913 100644 --- a/storage/ndb/include/ndb_constants.h +++ b/storage/ndb/include/ndb_constants.h @@ -89,5 +89,12 @@ */ #define NDB_TEMP_TAB_PERMANENT 0 #define NDB_TEMP_TAB_TEMPORARY 1 + +/* + * Table single user mode + */ +#define NDB_SUM_LOCKED 0 +#define NDB_SUM_READONLY 1 +#define NDB_SUM_READ_WRITE 2 #endif diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index c3ea909ba2e..2fc594b3f5a 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -115,8 +115,6 @@ static const char table_name_separator = '/'; #endif #ifdef __cplusplus -inline void* operator new(size_t, void* __p) { return __p; } -inline void* operator new[](size_t, void* __p) { return __p; } extern "C" { #endif diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in index 9e1edeecd1e..5405ad4d7aa 100644 --- a/storage/ndb/include/ndb_version.h.in +++ b/storage/ndb/include/ndb_version.h.in @@ -16,8 +16,7 @@ #ifndef NDB_VERSION_H #define NDB_VERSION_H -#include <ndb_global.h> -#include <version.h> +#include <ndb_types.h> /* NDB build version */ #define NDB_VERSION_BUILD @NDB_VERSION_BUILD@ @@ -32,19 +31,35 @@ #define NDB_VERSION_STATUS "@NDB_VERSION_STATUS@" -#define MAKE_VERSION(A,B,C) (((A) << 16) | ((B) << 8) | ((C) << 0)) +#define NDB_MAKE_VERSION(A,B,C) (((A) << 16) | ((B) << 8) | ((C) << 0)) -#define NDB_VERSION_D MAKE_VERSION(NDB_VERSION_MAJOR, NDB_VERSION_MINOR, NDB_VERSION_BUILD) +#define NDB_VERSION_D NDB_MAKE_VERSION(NDB_VERSION_MAJOR, NDB_VERSION_MINOR, NDB_VERSION_BUILD) #define NDB_VERSION_STRING_BUF_SZ 100 #ifdef __cplusplus -extern "C" -#else -extern +extern "C" { #endif -char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; -#define NDB_VERSION_STRING (getVersionString(NDB_VERSION, NDB_VERSION_STATUS, \ - ndb_version_string_buf, \ - sizeof(ndb_version_string_buf))) + +void ndbPrintVersion(); + +Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build); + +Uint32 ndbGetMajor(Uint32 version); + +Uint32 ndbGetMinor(Uint32 version); + +Uint32 ndbGetBuild(Uint32 version); + +const char* ndbGetVersionString(Uint32 version, const char * status, + char *buf, unsigned sz); +const char* ndbGetOwnVersionString(); + +Uint32 ndbGetOwnVersion(); + +#ifdef __cplusplus +} +#endif + +#define NDB_VERSION_STRING ndbGetOwnVersionString() #define NDB_VERSION ndbGetOwnVersion() @@ -59,19 +74,19 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; /** * From which version do we support rowid */ -#define NDBD_ROWID_VERSION (MAKE_VERSION(5,1,6)) -#define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17) -#define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18) -#define NDBD_FRAGID_VERSION (MAKE_VERSION(5,1,6)) -#define NDBD_DICT_LOCK_VERSION_5 MAKE_VERSION(5,0,23) -#define NDBD_DICT_LOCK_VERSION_5_1 MAKE_VERSION(5,1,12) +#define NDBD_ROWID_VERSION (NDB_MAKE_VERSION(5,1,6)) +#define NDBD_INCL_NODECONF_VERSION_4 NDB_MAKE_VERSION(4,1,17) +#define NDBD_INCL_NODECONF_VERSION_5 NDB_MAKE_VERSION(5,0,18) +#define NDBD_FRAGID_VERSION (NDB_MAKE_VERSION(5,1,6)) +#define NDBD_DICT_LOCK_VERSION_5 NDB_MAKE_VERSION(5,0,23) +#define NDBD_DICT_LOCK_VERSION_5_1 NDB_MAKE_VERSION(5,1,12) -#define NDBD_UPDATE_FRAG_DIST_KEY_50 MAKE_VERSION(5,0,26) -#define NDBD_UPDATE_FRAG_DIST_KEY_51 MAKE_VERSION(5,1,12) +#define NDBD_UPDATE_FRAG_DIST_KEY_50 NDB_MAKE_VERSION(5,0,26) +#define NDBD_UPDATE_FRAG_DIST_KEY_51 NDB_MAKE_VERSION(5,1,12) -#define NDBD_QMGR_SINGLEUSER_VERSION_5 MAKE_VERSION(5,0,25) +#define NDBD_QMGR_SINGLEUSER_VERSION_5 NDB_MAKE_VERSION(5,0,25) -#define NDBD_NODE_VERSION_REP MAKE_VERSION(6,1,1) +#define NDBD_NODE_VERSION_REP NDB_MAKE_VERSION(6,1,1) #endif diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index 80662f901b1..e758d174309 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -1055,6 +1055,8 @@ class Ndb friend class NdbDictInterface; friend class NdbBlob; friend class NdbImpl; + friend class Ndb_internal; + friend class NdbScanFilterImpl; #endif public: @@ -1104,7 +1106,7 @@ public: * * @param aCatalogName is the new name of the current catalog */ - void setCatalogName(const char * aCatalogName); + int setCatalogName(const char * aCatalogName); /** * The current schema name can be fetched by getSchemaName. @@ -1118,7 +1120,7 @@ public: * * @param aSchemaName is the new name of the current schema */ - void setSchemaName(const char * aSchemaName); + int setSchemaName(const char * aSchemaName); #endif /** @@ -1133,7 +1135,7 @@ public: * * @param aDatabaseName is the new name of the current database */ - void setDatabaseName(const char * aDatabaseName); + int setDatabaseName(const char * aDatabaseName); /** * The current database schema name can be fetched by getDatabaseSchemaName. @@ -1147,7 +1149,7 @@ public: * * @param aDatabaseSchemaName is the new name of the current database schema */ - void setDatabaseSchemaName(const char * aDatabaseSchemaName); + int setDatabaseSchemaName(const char * aDatabaseSchemaName); #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** Set database and schema name to match previously retrieved table @@ -1280,6 +1282,16 @@ public: */ /** + * Structure for passing in pointers to startTransaction + * + */ + struct Key_part_ptr + { + const void * ptr; + unsigned len; + }; + + /** * Start a transaction * * @note When the transaction is completed it must be closed using @@ -1300,6 +1312,30 @@ public: Uint32 keyLen = 0); /** + * Compute hash value given table/keys + * + * @param hashvalueptr - OUT, is set to hashvalue if return value is 0 + * @param table Pointer to table object + * @param keyData Null-terminated array of pointers to keyParts that is + * part of distribution key. + * Length of resp. keyPart will be read from + * metadata and checked against passed value + * @param xfrmbuf Pointer to temporary buffer that will be used + * to calculate hashvalue + * @param xfrmbuflen Lengh of buffer + * + * @note if xfrmbuf is null (default) malloc/free will be made + * if xfrmbuf is not null but length is too short, method will fail + * + * @return 0 - ok - hashvalueptr is set + * else - fail, return error code + */ + static int computeHash(Uint32* hashvalueptr, + const NdbDictionary::Table*, + const struct Key_part_ptr * keyData, + void* xfrmbuf = 0, Uint32 xfrmbuflen = 0); + + /** * Close a transaction. * * @note should be called after the transaction has completed, irrespective @@ -1488,12 +1524,15 @@ public: int initAutoIncrement(); int getAutoIncrementValue(const char* aTableName, - Uint64 & tupleId, Uint32 cacheSize); + Uint64 & tupleId, Uint32 cacheSize, + Uint64 step = 1, Uint64 start = 1); int getAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId, Uint32 cacheSize); + Uint64 & tupleId, Uint32 cacheSize, + Uint64 step = 1, Uint64 start = 1); int getAutoIncrementValue(const NdbDictionary::Table * aTable, TupleIdRange & range, Uint64 & tupleId, - Uint32 cacheSize); + Uint32 cacheSize, + Uint64 step = 1, Uint64 start = 1); int readAutoIncrementValue(const char* aTableName, Uint64 & tupleId); int readAutoIncrementValue(const NdbDictionary::Table * aTable, @@ -1510,7 +1549,7 @@ public: private: int getTupleIdFromNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 & tupleId, - Uint32 cacheSize); + Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); int readTupleIdFromNdb(const NdbTableImpl* table, TupleIdRange & range, Uint64 & tupleId); int setTupleIdInNdb(const NdbTableImpl* table, diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 895bae09ab0..58882e139fd 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -429,7 +429,7 @@ public: * Set name of column * @param name Name of the column */ - void setName(const char * name); + int setName(const char * name); /** * Set whether column is nullable or not @@ -520,7 +520,7 @@ public: void setAutoIncrement(bool); bool getAutoIncrement() const; void setAutoIncrementInitialValue(Uint64 val); - void setDefaultValue(const char*); + int setDefaultValue(const char*); const char* getDefaultValue() const; static const Column * FRAGMENT; @@ -534,6 +534,7 @@ public: static const Column * RECORDS_IN_RANGE; static const Column * ROWID; static const Column * ROW_GCI; + static const Column * ANY_VALUE; static const Column * COPY_ROWID; int getSizeInBytes() const; @@ -577,6 +578,15 @@ public: */ class Table : public Object { public: + /* + * Single user mode specifies access rights to table during single user mode + */ + enum SingleUserMode { + SingleUserModeLocked = NDB_SUM_LOCKED, + SingleUserModeReadOnly = NDB_SUM_READONLY, + SingleUserModeReadWrite = NDB_SUM_READ_WRITE + }; + /** * @name General * @{ @@ -750,13 +760,13 @@ public: * Name of table * @param name Name of table */ - void setName(const char * name); + int setName(const char * name); /** * Add a column definition to a table * @note creates a copy */ - void addColumn(const Column &); + int addColumn(const Column &); /** * @see NdbDictionary::Table::getLogging. @@ -810,9 +820,9 @@ public: */ void setMaxLoadFactor(int); - void setTablespaceName(const char * name); + int setTablespaceName(const char * name); const char * getTablespaceName() const; - void setTablespace(const class Tablespace &); + int setTablespace(const class Tablespace &); bool getTablespace(Uint32 *id= 0, Uint32 *version= 0) const; /** @@ -845,7 +855,7 @@ public: /** * Set frm file to store with this table */ - void setFrm(const void* data, Uint32 len); + int setFrm(const void* data, Uint32 len); /** * Set array of fragment information containing @@ -853,12 +863,12 @@ public: * Node group identity * Fragment State */ - void setFragmentData(const void* data, Uint32 len); + int setFragmentData(const void* data, Uint32 len); /** * Set/Get tablespace names per fragment */ - void setTablespaceNames(const void* data, Uint32 len); + int setTablespaceNames(const void* data, Uint32 len); const void *getTablespaceNames(); Uint32 getTablespaceNamesLen() const; @@ -866,7 +876,7 @@ public: * Set tablespace information per fragment * Contains a tablespace id and a tablespace version */ - void setTablespaceData(const void* data, Uint32 len); + int setTablespaceData(const void* data, Uint32 len); /** * Set array of information mapping range values and list values @@ -875,7 +885,7 @@ public: * one pair per fragment. For list partitions it could be any number * of pairs, at least as many as there are fragments. */ - void setRangeListData(const void* data, Uint32 len); + int setRangeListData(const void* data, Uint32 len); /** * Set table object type @@ -896,6 +906,13 @@ public: void setMinRows(Uint64 minRows); Uint64 getMinRows() const; + /** + * Set/Get SingleUserMode + */ + void setSingleUserMode(enum SingleUserMode); + enum SingleUserMode getSingleUserMode() const; + + /** @} *******************************************************************/ /** @@ -1091,26 +1108,26 @@ public: /** * Set the name of an index */ - void setName(const char * name); + int setName(const char * name); /** * Define the name of the table to be indexed */ - void setTable(const char * name); + int setTable(const char * name); /** * Add a column to the index definition * Note that the order of columns will be in * the order they are added (only matters for ordered indexes). */ - void addColumn(const Column & c); + int addColumn(const Column & c); /** * Add a column name to the index definition * Note that the order of indexes will be in * the order they are added (only matters for ordered indexes). */ - void addColumnName(const char * name); + int addColumnName(const char * name); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED /** @@ -1119,7 +1136,7 @@ public: * the order they are added (only matters for ordered indexes). * Depricated, use addColumnName instead. */ - void addIndexColumn(const char * name); + int addIndexColumn(const char * name); #endif /** @@ -1127,7 +1144,7 @@ public: * Note that the order of indexes will be in * the order they are added (only matters for ordered indexes). */ - void addColumnNames(unsigned noOfNames, const char ** names); + int addColumnNames(unsigned noOfNames, const char ** names); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED /** @@ -1136,7 +1153,7 @@ public: * the order they are added (only matters for ordered indexes). * Depricated, use addColumnNames instead. */ - void addIndexColumns(int noOfNames, const char ** names); + int addIndexColumns(int noOfNames, const char ** names); #endif /** @@ -1273,7 +1290,7 @@ public: /** * Set unique identifier for the event */ - void setName(const char *name); + int setName(const char *name); /** * Get unique identifier for the event */ @@ -1300,7 +1317,7 @@ public: * @note preferred way is using setTable(const NdbDictionary::Table&) * or constructor with table object parameter */ - void setTable(const char *tableName); + int setTable(const char *tableName); /** * Get table name for events * @@ -1526,8 +1543,8 @@ public: Uint64 getSize() const; Uint64 getFree() const; - void setTablespace(const char * name); - void setTablespace(const class Tablespace &); + int setTablespace(const char * name); + int setTablespace(const class Tablespace &); const char * getTablespace() const; void getTablespaceId(ObjectId * dst) const; diff --git a/storage/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp index d56b79dc2e6..437088d2893 100644 --- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp @@ -203,6 +203,13 @@ public: Uint64 getGCI() const; /** + * Retrieve the AnyValue of the latest retrieved event + * + * @return AnyValue + */ + Uint32 getAnyValue() const; + + /** * Retrieve the complete GCI in the cluster (not necessarily * associated with an event) * diff --git a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp index 0f06d8041ee..49e55f54f1a 100644 --- a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp @@ -181,8 +181,6 @@ private: const class NdbTableImpl* aTable, NdbTransaction*); - int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId); - // Private attributes const NdbIndexImpl* m_theIndex; friend struct Ndb_free_list_t<NdbIndexOperation>; diff --git a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp index 07eea0bfbfb..38f353010a9 100644 --- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp @@ -64,12 +64,14 @@ public: bool order_by, bool order_desc = false, bool read_range_no = false, - bool keyinfo = false) { + bool keyinfo = false, + bool multi_range = false) { Uint32 scan_flags = (SF_OrderBy & -(Int32)order_by) | (SF_Descending & -(Int32)order_desc) | (SF_ReadRangeNo & -(Int32)read_range_no) | - (SF_KeyInfo & -(Int32)keyinfo); + (SF_KeyInfo & -(Int32)keyinfo) | + (SF_MultiRange & -(Int32)multi_range); return readTuples(lock_mode, scan_flags, parallel, batch); } diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp index 556412c4799..78dbadfd7ab 100644 --- a/storage/ndb/include/ndbapi/NdbOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbOperation.hpp @@ -93,8 +93,35 @@ public: ,LM_CommittedRead ///< Ignore locks, read last committed value #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL = 2, - LM_Dirty = 2 + LM_Dirty = 2, #endif + LM_SimpleRead = 3 ///< Read with shared lock, but release lock directly + }; + + /** + * How should transaction be handled if operation fails. + * + * If AO_IgnoreError, a failure in one operation will not abort the + * transaction, and NdbTransaction::execute() will return 0 (success). Use + * NdbOperation::getNdbError() to check for errors from individual + * operations. + * + * If AbortOnError, a failure in one operation will abort the transaction + * and cause NdbTransaction::execute() to return -1. + * + * Abort option can be set on execute(), or in the individual operation. + * Setting AO_IgnoreError or AbortOnError in execute() overrides the settings + * on individual operations. Setting DefaultAbortOption in execute() (the + * default) causes individual operation settings to be used. + * + * For READ, default is AO_IgnoreError + * DML, default is AbortOnError + * CommittedRead does _only_ support AO_IgnoreError + */ + enum AbortOption { + DefaultAbortOption = -1,///< Use default as specified by op-type + AbortOnError = 0, ///< Abort transaction on failed operation + AO_IgnoreError = 2 ///< Transaction continues on failed operation }; /** @@ -387,6 +414,9 @@ public: int setValue(const char* anAttrName, Uint64 aValue); int setValue(const char* anAttrName, float aValue); int setValue(const char* anAttrName, double aValue); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + int setAnyValue(Uint32 aValue); +#endif #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED int setValue(Uint32 anAttrId, const char* aValue, Uint32 len); @@ -776,8 +806,13 @@ public: */ LockMode getLockMode() const { return theLockMode; } + /** + * Get/set abort option + */ + AbortOption getAbortOption() const; + int setAbortOption(AbortOption); + #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void setAbortOption(Int8 ao) { m_abortOption = ao; } /** * Set/get partition key @@ -808,8 +843,10 @@ protected: virtual ~NdbOperation(); void next(NdbOperation*); // Set next pointer NdbOperation* next(); // Get next pointer + public: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + NdbTransaction* getNdbTransaction(); const NdbOperation* next() const; const NdbRecAttr* getFirstRecAttr() const; #endif @@ -856,7 +893,8 @@ protected: int doSend(int ProcessorId, Uint32 lastFlag); virtual int prepareSend(Uint32 TC_ConnectPtr, - Uint64 TransactionId); + Uint64 TransactionId, + AbortOption); virtual void setLastFlag(NdbApiSignal* signal, Uint32 lastFlag); int prepareSendInterpreted(); // Help routine to prepare* @@ -914,6 +952,8 @@ protected: // get table or index key from prepared signals int getKeyFromTCREQ(Uint32* data, Uint32 & size); + virtual void setReadLockMode(LockMode lockMode); + /****************************************************************************** * These are the private variables that are defined in the operation objects. *****************************************************************************/ @@ -1000,13 +1040,18 @@ protected: NdbBlob* theBlobList; /* - * Abort option per operation, used by blobs. Default -1. If set, - * overrides abort option on connection level. If set to IgnoreError, - * does not cause execute() to return failure. This is different from - * IgnoreError on connection level. + * Abort option per operation, used by blobs. + * See also comments on enum AbortOption. */ Int8 m_abortOption; + /* + * For blob impl, option to not propagate error to trans level. + * Could be AO_IgnoreError variant if we want it public. + * Ignored unless AO_IgnoreError is also set. + */ + Int8 m_noErrorPropagation; + friend struct Ndb_free_list_t<NdbOperation>; }; diff --git a/storage/ndb/include/ndbapi/NdbPool.hpp b/storage/ndb/include/ndbapi/NdbPool.hpp index 1963bf26448..44b6d7488f0 100644 --- a/storage/ndb/include/ndbapi/NdbPool.hpp +++ b/storage/ndb/include/ndbapi/NdbPool.hpp @@ -17,7 +17,8 @@ class Ndb; class NdbPool; bool -create_instance(Uint32 max_ndb_objects, +create_instance(Ndb_cluster_connection* cc, + Uint32 max_ndb_objects, Uint32 no_conn_obj, Uint32 init_no_ndb_objects); diff --git a/storage/ndb/include/ndbapi/NdbRecAttr.hpp b/storage/ndb/include/ndbapi/NdbRecAttr.hpp index 40d5b598c0c..121339e470b 100644 --- a/storage/ndb/include/ndbapi/NdbRecAttr.hpp +++ b/storage/ndb/include/ndbapi/NdbRecAttr.hpp @@ -131,6 +131,13 @@ public: /** * Get value stored in NdbRecAttr object. + * + * @return Medium value. + */ + Int32 medium_value() const; + + /** + * Get value stored in NdbRecAttr object. * * @return Short value. */ @@ -146,6 +153,13 @@ public: /** * Get value stored in NdbRecAttr object. * + * @return Int8 value. + */ + Int8 int8_value() const; + + /** + * Get value stored in NdbRecAttr object. + * * @return 64 bit unsigned value. */ Uint64 u_64_value() const; @@ -160,6 +174,13 @@ public: /** * Get value stored in NdbRecAttr object. * + * @return Unsigned medium value. + */ + Uint32 u_medium_value() const; + + /** + * Get value stored in NdbRecAttr object. + * * @return Unsigned short value. */ Uint16 u_short_value() const; @@ -174,6 +195,13 @@ public: /** * Get value stored in NdbRecAttr object. * + * @return Uint8 value. + */ + Uint8 u_8_value() const; + + /** + * Get value stored in NdbRecAttr object. + * * @return Float value. */ float float_value() const; @@ -302,6 +330,13 @@ NdbRecAttr::char_value() const } inline +Int8 +NdbRecAttr::int8_value() const +{ + return *(Int8*)theRef; +} + +inline Uint32 NdbRecAttr::u_32_value() const { @@ -323,6 +358,13 @@ NdbRecAttr::u_char_value() const } inline +Uint8 +NdbRecAttr::u_8_value() const +{ + return *(Uint8*)theRef; +} + +inline void NdbRecAttr::release() { @@ -409,6 +451,25 @@ NdbRecAttr::setUNDEFINED() class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &); +class NdbRecordPrintFormat +{ +public: + NdbRecordPrintFormat(); + virtual ~NdbRecordPrintFormat(); + const char *lines_terminated_by; + const char *fields_terminated_by; + const char *start_array_enclosure; + const char *end_array_enclosure; + const char *fields_enclosed_by; + const char *fields_optionally_enclosed_by; + const char *hex_prefix; + const char *null_string; + int hex_format; +}; +NdbOut& +ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r, + const NdbRecordPrintFormat &f); + #endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL #endif diff --git a/storage/ndb/include/ndbapi/NdbReceiver.hpp b/storage/ndb/include/ndbapi/NdbReceiver.hpp index 73bf5c66863..b8abd281496 100644 --- a/storage/ndb/include/ndbapi/NdbReceiver.hpp +++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp @@ -38,7 +38,7 @@ public: }; NdbReceiver(Ndb *aNdb); - void init(ReceiverType type, void* owner); + int init(ReceiverType type, void* owner); void release(); ~NdbReceiver(); @@ -57,7 +57,7 @@ public: bool checkMagicNumber() const; - inline void next(NdbReceiver* next) { m_next = next;} + inline void next(NdbReceiver* next_arg) { m_next = next_arg;} inline NdbReceiver* next() { return m_next; } void setErrorCode(int); @@ -75,7 +75,7 @@ private: * At setup */ class NdbRecAttr * getValue(const class NdbColumnImpl*, char * user_dst_ptr); - void do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range); + int do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range); void prepareSend(); void calculate_batch_size(Uint32, Uint32, Uint32&, Uint32&, Uint32&); diff --git a/storage/ndb/include/ndbapi/NdbScanFilter.hpp b/storage/ndb/include/ndbapi/NdbScanFilter.hpp index 1ef62558560..4527012a6c4 100644 --- a/storage/ndb/include/ndbapi/NdbScanFilter.hpp +++ b/storage/ndb/include/ndbapi/NdbScanFilter.hpp @@ -17,6 +17,7 @@ #define NDB_SCAN_FILTER_HPP #include <ndb_types.h> +#include <ndbapi_limits.h> /** * @class NdbScanFilter @@ -31,8 +32,13 @@ public: /** * Constructor * @param op The NdbOperation that the filter belongs to (is applied to). + * @param abort_on_too_large abort transaction on filter too large + * default: true + * @param max_size Maximum size of generated filter in words */ - NdbScanFilter(class NdbOperation * op); + NdbScanFilter(class NdbOperation * op, + bool abort_on_too_large = true, + Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS); ~NdbScanFilter(); /** @@ -166,6 +172,27 @@ public: /** @} *********************************************************************/ #endif + enum Error { + FilterTooLarge = 4294 + }; + + /** + * Get filter level error. + * + * Most errors are set only on operation level, and they abort the + * transaction. The error FilterTooLarge is set on filter level and + * by default it propagates to operation level and also aborts the + * transaction. + * + * If option abort_on_too_large is set to false, then FilterTooLarge + * does not propagate. One can then either ignore this error (in + * which case no filtering is done) or try to define a new filter + * immediately. + */ + const class NdbError & getNdbError() const; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + NdbOperation * getNdbOperation(); +#endif private: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL friend class NdbScanFilterImpl; diff --git a/storage/ndb/include/ndbapi/NdbScanOperation.hpp b/storage/ndb/include/ndbapi/NdbScanOperation.hpp index c957e3780cc..bc24b782add 100644 --- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp @@ -38,7 +38,8 @@ class NdbScanOperation : public NdbOperation { public: /** * Scan flags. OR-ed together and passed as second argument to - * readTuples. + * readTuples. Note that SF_MultiRange has to be set if several + * ranges (bounds) are to be passed. */ enum ScanFlag { SF_TupScan = (1 << 16), // scan TUP order @@ -46,6 +47,7 @@ public: SF_OrderBy = (1 << 24), // index scan in order SF_Descending = (2 << 24), // index scan in descending order SF_ReadRangeNo = (4 << 24), // enable @ref get_range_no + SF_MultiRange = (8 << 24), // scan is part of multi-range scan SF_KeyInfo = 1 // request KeyInfo to be sent back }; @@ -72,7 +74,8 @@ public: */ #ifdef ndb_readtuples_impossible_overload int readTuples(LockMode lock_mode = LM_Read, - Uint32 batch = 0, Uint32 parallel = 0, bool keyinfo = false); + Uint32 batch = 0, Uint32 parallel = 0, + bool keyinfo = false, bool multi_range = false); #endif inline int readTuples(int parallell){ @@ -211,6 +214,7 @@ protected: int init(const NdbTableImpl* tab, NdbTransaction*); int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId); int doSend(int ProcessorId); + virtual void setReadLockMode(LockMode lockMode); virtual void setErrorCode(int aErrorCode); virtual void setErrorCodeAbort(int aErrorCode); @@ -264,6 +268,7 @@ protected: bool m_descending; Uint32 m_read_range_no; NdbRecAttr *m_curr_row; // Pointer to last returned row + bool m_multi_range; // Mark if operation is part of multi-range scan bool m_executed; // Marker if operation should be released at close }; diff --git a/storage/ndb/include/ndbapi/NdbTransaction.hpp b/storage/ndb/include/ndbapi/NdbTransaction.hpp index ffba6f8d34f..6a057655398 100644 --- a/storage/ndb/include/ndbapi/NdbTransaction.hpp +++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp @@ -20,6 +20,7 @@ #include "NdbError.hpp" #include "NdbDictionary.hpp" #include "Ndb.hpp" +#include "NdbOperation.hpp" class NdbTransaction; class NdbOperation; @@ -44,11 +45,12 @@ typedef void (* NdbAsynchCallback)(int, NdbTransaction*, void*); #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL enum AbortOption { - CommitIfFailFree= 0, - TryCommit= 0, - AbortOnError= 0, - CommitAsMuchAsPossible= 2, - AO_IgnoreError= 2 + DefaultAbortOption = NdbOperation::DefaultAbortOption, + CommitIfFailFree = NdbOperation::AbortOnError, + TryCommit = NdbOperation::AbortOnError, + AbortOnError= NdbOperation::AbortOnError, + CommitAsMuchAsPossible = NdbOperation::AO_IgnoreError, + AO_IgnoreError= NdbOperation::AO_IgnoreError }; enum ExecType { NoExecTypeDef = -1, @@ -145,20 +147,6 @@ class NdbTransaction public: /** - * Commit type of transaction - */ - enum AbortOption { - AbortOnError= ///< Abort transaction on failed operation -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::AbortOnError -#endif - ,AO_IgnoreError= ///< Transaction continues on failed operation -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::AO_IgnoreError -#endif - }; - - /** * Execution type of transaction */ enum ExecType { @@ -182,6 +170,15 @@ public: #endif }; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + /** + * Convenience method to fetch this transaction's Ndb* object + */ + Ndb * getNdb() { + return theNdb; + } +#endif + #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED /** * Get an NdbOperation for a table. @@ -316,13 +313,15 @@ public: * @return 0 if successful otherwise -1. */ int execute(ExecType execType, - AbortOption abortOption = AbortOnError, + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, int force = 0 ); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED int execute(::ExecType execType, - ::AbortOption abortOption = ::AbortOnError, - int force = 0 ) - { return execute ((ExecType)execType,(AbortOption)abortOption,force); } + ::AbortOption abortOption = ::DefaultAbortOption, + int force = 0 ) { + return execute ((ExecType)execType, + (NdbOperation::AbortOption)abortOption, + force); } #endif #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @@ -353,14 +352,14 @@ public: void executeAsynchPrepare(ExecType execType, NdbAsynchCallback callback, void* anyObject, - AbortOption abortOption = AbortOnError); + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED void executeAsynchPrepare(::ExecType execType, NdbAsynchCallback callback, void* anyObject, - ::AbortOption abortOption = ::AbortOnError) - { executeAsynchPrepare((ExecType)execType, callback, anyObject, - (AbortOption)abortOption); } + ::AbortOption ao = ::DefaultAbortOption) { + executeAsynchPrepare((ExecType)execType, callback, anyObject, + (NdbOperation::AbortOption)ao); } #endif /** @@ -379,16 +378,16 @@ public: void executeAsynch(ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - AbortOption abortOption = AbortOnError, + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, int forceSend= 0); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED void executeAsynch(::ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - ::AbortOption abortOption= ::AbortOnError, + ::AbortOption abortOption= ::DefaultAbortOption, int forceSend= 0) { executeAsynch((ExecType)aTypeOfExec, aCallback, anyObject, - (AbortOption)abortOption, forceSend); } + (NdbOperation::AbortOption)abortOption, forceSend); } #endif #endif /** @@ -587,10 +586,10 @@ private: NdbTransaction(Ndb* aNdb); ~NdbTransaction(); - void init(); // Initialize connection object for new transaction + int init(); // Initialize connection object for new transaction int executeNoBlobs(ExecType execType, - AbortOption abortOption = AbortOnError, + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, int force = 0 ); /** @@ -644,7 +643,7 @@ private: int sendCOMMIT(); // Send a TC_COMMITREQ signal; void setGCI(int GCI); // Set the global checkpoint identity - int OpCompleteFailure(Uint8 abortoption, bool setFailure = true); + int OpCompleteFailure(NdbOperation*); int OpCompleteSuccess(); void CompletedOperations(); // Move active ops to list of completed @@ -734,7 +733,6 @@ private: Uint32 theNoOfOpSent; // How many operations have been sent Uint32 theNoOfOpCompleted; // How many operations have completed - Uint32 theNoOfOpFetched; // How many operations was actually fetched Uint32 theMyRef; // Our block reference Uint32 theTCConPtr; // Transaction Co-ordinator connection pointer. Uint64 theTransactionId; // theTransactionId of the transaction @@ -758,7 +756,6 @@ private: bool theTransactionIsStarted; bool theInUseState; bool theSimpleState; - Uint8 m_abortOption; // Type of commi enum ListState { NotInList, diff --git a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp index e3532b072c0..80bfe7461f8 100644 --- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp @@ -62,6 +62,24 @@ public: void set_name(const char *name); /** + * Set timeout + * + * Used as a timeout when talking to the management server, + * helps limit the amount of time that we may block when connecting + * + * Basically just calls ndb_mgm_set_timeout(h,ms). + * + * The default is 30 seconds. + * + * @param timeout_ms millisecond timeout. As with ndb_mgm_set_timeout, + * only increments of 1000 are really supported, + * with not to much gaurentees about calls completing + * in any hard amount of time. + * @return 0 on success + */ + int set_timeout(int timeout_ms); + + /** * Connect to a cluster management server * * @param no_retries specifies the number of retries to attempt diff --git a/storage/ndb/include/ndbapi/ndbapi_limits.h b/storage/ndb/include/ndbapi/ndbapi_limits.h index 63399e4bd0a..e283913d059 100644 --- a/storage/ndb/include/ndbapi/ndbapi_limits.h +++ b/storage/ndb/include/ndbapi/ndbapi_limits.h @@ -26,4 +26,6 @@ #define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4) #define NDB_MAX_ACTIVE_EVENTS 100 +#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS 50000 + #endif diff --git a/storage/ndb/include/transporter/TransporterDefinitions.hpp b/storage/ndb/include/transporter/TransporterDefinitions.hpp index 8154d8ea8bd..003824d01e8 100644 --- a/storage/ndb/include/transporter/TransporterDefinitions.hpp +++ b/storage/ndb/include/transporter/TransporterDefinitions.hpp @@ -117,6 +117,10 @@ struct SegmentedSectionPtr { struct SectionSegment * p; SegmentedSectionPtr() {} + SegmentedSectionPtr(Uint32 sz_arg, Uint32 i_arg, + struct SectionSegment *p_arg) + :sz(sz_arg), i(i_arg), p(p_arg) + {} void setNull() { p = 0;} bool isNull() const { return p == 0;} }; diff --git a/storage/ndb/include/util/BaseString.hpp b/storage/ndb/include/util/BaseString.hpp index 50abecc6e90..0c41f254edc 100644 --- a/storage/ndb/include/util/BaseString.hpp +++ b/storage/ndb/include/util/BaseString.hpp @@ -188,6 +188,7 @@ public: private: char* m_chr; unsigned m_len; + friend bool operator!(const BaseString& str); }; inline const char* @@ -261,6 +262,12 @@ BaseString::operator!=(const char *str) const return strcmp(m_chr, str) != 0; } +inline bool +operator!(const BaseString& str) +{ + return str.m_chr == NULL; +} + inline BaseString& BaseString::assign(const BaseString& str) { diff --git a/storage/ndb/include/util/InputStream.hpp b/storage/ndb/include/util/InputStream.hpp index 031260dac3b..3e696eac732 100644 --- a/storage/ndb/include/util/InputStream.hpp +++ b/storage/ndb/include/util/InputStream.hpp @@ -32,6 +32,7 @@ public: * Set the mutex to be UNLOCKED when blocking (e.g. select(2)) */ void set_mutex(NdbMutex *m) { m_mutex= m; }; + virtual void reset_timeout() {}; protected: NdbMutex *m_mutex; }; @@ -48,12 +49,17 @@ extern FileInputStream Stdin; class SocketInputStream : public InputStream { NDB_SOCKET_TYPE m_socket; - unsigned m_timeout; + unsigned m_timeout_ms; + unsigned m_timeout_remain; bool m_startover; + bool m_timedout; public: - SocketInputStream(NDB_SOCKET_TYPE socket, unsigned readTimeout = 1000); + SocketInputStream(NDB_SOCKET_TYPE socket, unsigned read_timeout_ms = 60000); virtual ~SocketInputStream() {} char* gets(char * buf, int bufLen); + bool timedout() { return m_timedout; }; + void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; + }; #endif diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp index cbc00fb286a..05fc69a46c3 100644 --- a/storage/ndb/include/util/OutputStream.hpp +++ b/storage/ndb/include/util/OutputStream.hpp @@ -29,6 +29,7 @@ public: virtual int print(const char * fmt, ...) = 0; virtual int println(const char * fmt, ...) = 0; virtual void flush() {}; + virtual void reset_timeout() {}; }; class FileOutputStream : public OutputStream { @@ -36,6 +37,7 @@ class FileOutputStream : public OutputStream { public: FileOutputStream(FILE * file = stdout); virtual ~FileOutputStream() {} + FILE *getFile() { return f; } int print(const char * fmt, ...); int println(const char * fmt, ...); @@ -44,10 +46,14 @@ public: class SocketOutputStream : public OutputStream { NDB_SOCKET_TYPE m_socket; - unsigned m_timeout; + unsigned m_timeout_ms; + bool m_timedout; + unsigned m_timeout_remain; public: - SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000); + SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000); virtual ~SocketOutputStream() {} + bool timedout() { return m_timedout; }; + void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; int print(const char * fmt, ...); int println(const char * fmt, ...); diff --git a/storage/ndb/include/util/Vector.hpp b/storage/ndb/include/util/Vector.hpp index 1a3767402e3..7ae4228985d 100644 --- a/storage/ndb/include/util/Vector.hpp +++ b/storage/ndb/include/util/Vector.hpp @@ -29,7 +29,7 @@ public: const T& operator[](unsigned i) const; unsigned size() const { return m_size; }; - void push_back(const T &); + int push_back(const T &); void push(const T&, unsigned pos); T& set(T&, unsigned pos, T& fill_obj); T& back(); @@ -38,7 +38,7 @@ public: void clear(); - void fill(unsigned new_size, T & obj); + int fill(unsigned new_size, T & obj); Vector<T>& operator=(const Vector<T>&); @@ -54,6 +54,14 @@ private: template<class T> Vector<T>::Vector(int i){ m_items = new T[i]; + if (m_items == NULL) + { + errno = ENOMEM; + m_size = 0; + m_arraySize = 0; + m_incSize = 0; + return; + } m_size = 0; m_arraySize = i; m_incSize = 50; @@ -91,12 +99,15 @@ Vector<T>::back(){ } template<class T> -void +int Vector<T>::push_back(const T & t){ if(m_size == m_arraySize){ T * tmp = new T [m_arraySize + m_incSize]; - if(!tmp) - abort(); + if(tmp == NULL) + { + errno = ENOMEM; + return -1; + } for (unsigned k = 0; k < m_size; k++) tmp[k] = m_items[k]; delete[] m_items; @@ -105,6 +116,7 @@ Vector<T>::push_back(const T & t){ } m_items[m_size] = t; m_size++; + return 0; } template<class T> @@ -150,10 +162,12 @@ Vector<T>::clear(){ } template<class T> -void +int Vector<T>::fill(unsigned new_size, T & obj){ while(m_size <= new_size) - push_back(obj); + if (push_back(obj)) + return -1; + return 0; } template<class T> @@ -177,8 +191,8 @@ struct MutexVector : public NdbLockable { const T& operator[](unsigned i) const; unsigned size() const { return m_size; }; - void push_back(const T &); - void push_back(const T &, bool lockMutex); + int push_back(const T &); + int push_back(const T &, bool lockMutex); T& back(); void erase(unsigned index); @@ -187,7 +201,7 @@ struct MutexVector : public NdbLockable { void clear(); void clear(bool lockMutex); - void fill(unsigned new_size, T & obj); + int fill(unsigned new_size, T & obj); private: T * m_items; unsigned m_size; @@ -198,6 +212,14 @@ private: template<class T> MutexVector<T>::MutexVector(int i){ m_items = new T[i]; + if (m_items == NULL) + { + errno = ENOMEM; + m_size = 0; + m_arraySize = 0; + m_incSize = 0; + return; + } m_size = 0; m_arraySize = i; m_incSize = 50; @@ -235,11 +257,17 @@ MutexVector<T>::back(){ } template<class T> -void +int MutexVector<T>::push_back(const T & t){ lock(); if(m_size == m_arraySize){ T * tmp = new T [m_arraySize + m_incSize]; + if (tmp == NULL) + { + errno = ENOMEM; + unlock(); + return -1; + } for (unsigned k = 0; k < m_size; k++) tmp[k] = m_items[k]; delete[] m_items; @@ -249,15 +277,23 @@ MutexVector<T>::push_back(const T & t){ m_items[m_size] = t; m_size++; unlock(); + return 0; } template<class T> -void +int MutexVector<T>::push_back(const T & t, bool lockMutex){ if(lockMutex) lock(); if(m_size == m_arraySize){ T * tmp = new T [m_arraySize + m_incSize]; + if (tmp == NULL) + { + errno = ENOMEM; + if(lockMutex) + unlock(); + return -1; + } for (unsigned k = 0; k < m_size; k++) tmp[k] = m_items[k]; delete[] m_items; @@ -268,6 +304,7 @@ MutexVector<T>::push_back(const T & t, bool lockMutex){ m_size++; if(lockMutex) unlock(); + return 0; } template<class T> @@ -315,10 +352,12 @@ MutexVector<T>::clear(bool l){ } template<class T> -void +int MutexVector<T>::fill(unsigned new_size, T & obj){ while(m_size <= new_size) - push_back(obj); + if (push_back(obj)) + return -1; + return 0; } #endif diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h index 9cb65d4bc2e..f18bb9646cc 100644 --- a/storage/ndb/include/util/ndb_opts.h +++ b/storage/ndb/include/util/ndb_opts.h @@ -58,40 +58,40 @@ const char *opt_debug= 0; "Set connect string for connecting to ndb_mgmd. " \ "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \ "Overrides specifying entries in NDB_CONNECTSTRING and my.cnf", \ - (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \ + (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-mgmd-host", OPT_NDB_MGMD, \ "Set host and port for connecting to ndb_mgmd. " \ "Syntax: <hostname>[:<port>].", \ - (gptr*) &opt_ndb_mgmd, (gptr*) &opt_ndb_mgmd, 0, \ + (uchar**) &opt_ndb_mgmd, (uchar**) &opt_ndb_mgmd, 0, \ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-nodeid", OPT_NDB_NODEID, \ "Set node id for this node.", \ - (gptr*) &opt_ndb_nodeid, (gptr*) &opt_ndb_nodeid, 0, \ + (uchar**) &opt_ndb_nodeid, (uchar**) &opt_ndb_nodeid, 0, \ GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-shm", OPT_NDB_SHM,\ "Allow optimizing using shared memory connections when available",\ - (gptr*) &opt_ndb_shm, (gptr*) &opt_ndb_shm, 0,\ + (uchar**) &opt_ndb_shm, (uchar**) &opt_ndb_shm, 0,\ GET_BOOL, NO_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0 },\ {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION,\ "Select nodes for transactions in a more optimal way",\ - (gptr*) &opt_ndb_optimized_node_selection,\ - (gptr*) &opt_ndb_optimized_node_selection, 0,\ + (uchar**) &opt_ndb_optimized_node_selection,\ + (uchar**) &opt_ndb_optimized_node_selection, 0,\ GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\ { "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\ - (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \ + (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "core-file", OPT_WANT_CORE, "Write core on errors.",\ - (gptr*) &opt_core, (gptr*) &opt_core, 0,\ + (uchar**) &opt_core, (uchar**) &opt_core, 0,\ GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0},\ {"character-sets-dir", OPT_CHARSETS_DIR,\ - "Directory where character sets are.", (gptr*) &charsets_dir,\ - (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\ + "Directory where character sets are.", (uchar**) &charsets_dir,\ + (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\ #ifndef DBUG_OFF #define NDB_STD_OPTS(prog_name) \ { "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \ - (gptr*) &opt_debug, (gptr*) &opt_debug, \ + (uchar**) &opt_debug, (uchar**) &opt_debug, \ 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \ NDB_STD_OPTS_COMMON #else diff --git a/storage/ndb/include/util/ndb_rand.h b/storage/ndb/include/util/ndb_rand.h new file mode 100644 index 00000000000..1521ca9c4ff --- /dev/null +++ b/storage/ndb/include/util/ndb_rand.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDB_RAND_H +#define NDB_RAND_H + +#define NDB_RAND_MAX 32767 + +#ifdef __cplusplus +extern "C" { +#endif + +int ndb_rand(void); + +void ndb_srand(unsigned seed); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/storage/ndb/include/util/socket_io.h b/storage/ndb/include/util/socket_io.h index a988f4a1e8d..f76b6790b19 100644 --- a/storage/ndb/include/util/socket_io.h +++ b/storage/ndb/include/util/socket_io.h @@ -28,15 +28,20 @@ extern "C" { int read_socket(NDB_SOCKET_TYPE, int timeout_ms, char *, int len); - int readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, + int readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, char * buf, int buflen, NdbMutex *mutex); - int write_socket(NDB_SOCKET_TYPE, int timeout_ms, const char[], int len); - - int print_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, ...); - int println_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, ...); - int vprint_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, va_list); - int vprintln_socket(NDB_SOCKET_TYPE, int timeout_ms, const char *, va_list); + int write_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char[], int len); + + int print_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, ...); + int println_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, ...); + int vprint_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, va_list); + int vprintln_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, + const char *, va_list); #ifdef __cplusplus } diff --git a/storage/ndb/include/util/version.h b/storage/ndb/include/util/version.h index 42513d00442..9ea18ecd9d9 100644 --- a/storage/ndb/include/util/version.h +++ b/storage/ndb/include/util/version.h @@ -16,25 +16,18 @@ #ifndef VERSION_H #define VERSION_H -#include <ndb_types.h> +#include <ndb_version.h> + +/* some backwards compatible macros */ +#define MAKE_VERSION(A,B,C) NDB_MAKE_VERSION(A,B,C) +#define getMajor(a) ndbGetMajor(a) +#define getMinor(a) ndbGetMinor(a) +#define getBuild(a) ndbGetBuild(a) + #ifdef __cplusplus extern "C" { #endif - Uint32 getMajor(Uint32 version); - - Uint32 getMinor(Uint32 version); - - Uint32 getBuild(Uint32 version); - - Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build); - - const char* getVersionString(Uint32 version, const char * status, - char *buf, unsigned sz); - - void ndbPrintVersion(); - Uint32 ndbGetOwnVersion(); - int ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion); int ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion); int ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion); diff --git a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp index a195a419aaf..1f19f36d674 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp +++ b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp @@ -75,7 +75,7 @@ int main(int argc, char** argv) exit(-1); } - if (cluster_connection->wait_until_ready(30,0)) + if (cluster_connection->wait_until_ready(30,0) < 0) { std::cout << "Cluster was not ready within 30 secs." << std::endl; exit(-1); diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp index 0a4f6d92f2c..4e82fc3e42b 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp +++ b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp @@ -281,12 +281,14 @@ static void do_read(Ndb &myNdb) if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); if(myTransaction->execute( NdbTransaction::Commit ) == -1) - if (i == 3) { - std::cout << "Detected that deleted tuple doesn't exist!" << std::endl; - } else { - APIERROR(myTransaction->getNdbError()); - } + APIERROR(myTransaction->getNdbError()); + if (myTransaction->getNdbError().classification == NdbError::NoDataFound) + if (i == 3) + std::cout << "Detected that deleted tuple doesn't exist!" << std::endl; + else + APIERROR(myTransaction->getNdbError()); + if (i != 3) { printf(" %2d %2d\n", i, myRecAttr->u_32_value()); } diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp index dae99642a24..440face79ae 100644 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp +++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp @@ -19,6 +19,17 @@ // Correct output from this program is: // // ATTR1 ATTR2 +// 0 0 +// 1 1 +// 2 2 +// 3 3 +// 4 4 +// 5 5 +// 6 6 +// 7 7 +// 8 8 +// 9 9 +// ATTR1 ATTR2 // 0 10 // 1 1 // 2 12 @@ -166,7 +177,8 @@ int main(int argc, char** argv) NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL); if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - if(myTransaction->execute( NdbTransaction::Commit ) != -1) + if(myTransaction->execute( NdbTransaction::Commit, + NdbOperation::AbortOnError ) != -1) printf(" %2d %2d\n", myRecAttr->u_32_value(), i); myNdb->closeTransaction(myTransaction); @@ -232,7 +244,8 @@ int main(int argc, char** argv) NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - if(myTransaction->execute( NdbTransaction::Commit ) == -1) + if(myTransaction->execute( NdbTransaction::Commit, + NdbOperation::AbortOnError ) == -1) if (i == 3) { std::cout << "Detected that deleted tuple doesn't exist!\n"; } else { diff --git a/storage/ndb/src/Makefile.am b/storage/ndb/src/Makefile.am index 3e91db21c4f..627347daf02 100644 --- a/storage/ndb/src/Makefile.am +++ b/storage/ndb/src/Makefile.am @@ -21,6 +21,8 @@ ndblib_LTLIBRARIES = libndbclient.la libndbclient_la_SOURCES = +libndbclient_la_LDFLAGS = -version-info @NDB_SHARED_LIB_VERSION@ @NDB_LD_VERSION_SCRIPT@ + libndbclient_la_LIBADD = \ ndbapi/libndbapi.la \ common/transporter/libtransporter.la \ diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index 6e446646898..068b0c6ac18 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -16,6 +16,7 @@ #include <ndb_global.h> #include <EventLogger.hpp> +#include <TransporterCallback.hpp> #include <NdbConfig.h> #include <kernel/BlockNumbers.h> @@ -497,10 +498,10 @@ void getTextTransReportCounters(QQQQ) { // ------------------------------------------------------------------- BaseString::snprintf(m_text, m_text_len, "Trans. Count = %u, Commit Count = %u, " - "Read Count = %u, Simple Read Count = %u,\n" + "Read Count = %u, Simple Read Count = %u, " "Write Count = %u, AttrInfo Count = %u, " - "Concurrent Operations = %u, Abort Count = %u\n" - " Scans: %u Range scans: %u", + "Concurrent Operations = %u, Abort Count = %u" + " Scans = %u Range scans = %u", theData[1], theData[2], theData[3], @@ -526,11 +527,102 @@ void getTextUndoLogBlocked(QQQQ) { theData[1], theData[2]); } + void getTextTransporterError(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Transporter to node %d reported error 0x%x", - theData[1], - theData[2]); + struct myTransporterError{ + Uint32 errorNum; + char errorString[256]; + }; + int i = 0; + int lenth = 0; + static const struct myTransporterError TransporterErrorString[]= + { + //TE_NO_ERROR = 0 + {TE_NO_ERROR,"No error"}, + //TE_ERROR_CLOSING_SOCKET = 0x1 + {TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"}, + //TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2 + {TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"}, + //TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT + {TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"}, + //TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT + {TE_INVALID_CHECKSUM,"Error found in message (checksum)"}, + //TE_COULD_NOT_CREATE_SOCKET = 0x5 + {TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"}, + //TE_COULD_NOT_BIND_SOCKET = 0x6 + {TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"}, + //TE_LISTEN_FAILED = 0x7 + {TE_LISTEN_FAILED,"Error found while listening to server socket"}, + //TE_ACCEPT_RETURN_ERROR = 0x8 + {TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"}, + //TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT + {TE_SHM_DISCONNECT,"The remote node has disconnected"}, + //TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT + {TE_SHM_IPC_STAT,"Unable to check shm segment"}, + //TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd + {TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"}, + //TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe + {TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"}, + //TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf + {TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"}, + //TE_TOO_SMALL_SIGID = 0x10 + {TE_TOO_SMALL_SIGID,"Sig ID too small"}, + //TE_TOO_LARGE_SIGID = 0x11 + {TE_TOO_LARGE_SIGID,"Sig ID too large"}, + //TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT + {TE_WAIT_STACK_FULL,"Wait stack was full"}, + //TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT + {TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"}, + //TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT + {TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"}, + //TE_SIGNAL_LOST = 0x15 + {TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"}, + //TE_SEND_BUFFER_FULL = 0x16 + {TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"}, + //TE_SCI_LINK_ERROR = 0x0017 + {TE_SCI_LINK_ERROR,"There is no link from this node to the switch"}, + //TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"}, + //TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"}, + //TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"}, + //TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT + {TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"}, + //TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT + {TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"}, + //TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC + {TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"}, + //TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"}, + //TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC + {TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"}, + //TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"}, + //TE_SHM_IPC_PERMANENT = 0x21 + {TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"}, + //TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22 + {TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"} + }; + + lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError); + for(i=0; i<lenth; i++) + { + if(theData[2] == (Uint32) TransporterErrorString[i].errorNum) + { + BaseString::snprintf(m_text, m_text_len, + "Transporter to node %d reported error 0x%x: %s", + theData[1], + theData[2], + TransporterErrorString[i].errorString); + break; + } + } + if(i == lenth) + BaseString::snprintf(m_text, m_text_len, + "Transporter to node %d reported error 0x%x: unknown error", + theData[1], + theData[2]); } void getTextTransporterWarning(QQQQ) { getTextTransporterError(m_text, m_text_len, theData); @@ -705,9 +797,9 @@ void getTextBackupFailedToStart(QQQQ) { } void getTextBackupCompleted(QQQQ) { BaseString::snprintf(m_text, m_text_len, - "Backup %u started from node %u completed\n" - " StartGCP: %u StopGCP: %u\n" - " #Records: %u #LogRecords: %u\n" + "Backup %u started from node %u completed." + " StartGCP: %u StopGCP: %u" + " #Records: %u #LogRecords: %u" " Data: %u bytes Log: %u bytes", theData[2], refToNode(theData[1]), theData[3], theData[4], theData[6], theData[8], @@ -950,6 +1042,8 @@ EventLogger::close() removeAllHandlers(); } +#ifdef NOT_USED + static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -959,6 +1053,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif int EventLoggerBase::event_lookup(int eventType, diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 89f9db835a1..dae5527dc5e 100644 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -67,6 +67,7 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), DTIMAP(Table, MinRowsLow, MinRowsLow), DTIMAP(Table, MinRowsHigh, MinRowsHigh), + DTIMAP(Table, SingleUserMode, SingleUserMode), DTIBREAK(AttributeName) }; @@ -166,6 +167,8 @@ DictTabInfo::Table::init(){ MaxRowsHigh = 0; MinRowsLow = 0; MinRowsHigh = 0; + + SingleUserMode = 0; } void diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp index 8530187963d..0d31cd5de7f 100644 --- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -350,15 +350,10 @@ const GsnName SignalNames [] = { ,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" } ,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" } - ,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" } ,{ GSN_START_ORD, "START_ORD" } ,{ GSN_STOP_ORD, "STOP_ORD" } ,{ GSN_TAMPER_ORD, "TAMPER_ORD" } - ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" } - ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" } - ,{ GSN_SET_VAR_REF, "SET_VAR_REF" } - ,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" } - + ,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" } ,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" } ,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" } @@ -626,6 +621,14 @@ const GsnName SignalNames [] = { ,{ GSN_LCP_PREPARE_REF, "LCP_PREPARE_REF" } ,{ GSN_LCP_PREPARE_CONF, "LCP_PREPARE_CONF" } + ,{ GSN_DICT_ABORT_REQ, "DICT_ABORT_REQ" } + ,{ GSN_DICT_ABORT_REF, "DICT_ABORT_REF" } + ,{ GSN_DICT_ABORT_CONF, "DICT_ABORT_CONF" } + + ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ" } + ,{ GSN_DICT_COMMIT_REF, "DICT_COMMIT_REF" } + ,{ GSN_DICT_COMMIT_CONF, "DICT_COMMIT_CONF" } + /* DICT LOCK */ ,{ GSN_DICT_LOCK_REQ, "DICT_LOCK_REQ" } ,{ GSN_DICT_LOCK_CONF, "DICT_LOCK_CONF" } diff --git a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp index 65589f8cd6e..377863f9446 100644 --- a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp +++ b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp @@ -51,11 +51,11 @@ printTCKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receive (TcKeyConf::getMarkerFlag(confInfo) == 0)?"false":"true"); fprintf(output, "Operations:\n"); for(i = 0; i < noOfOp; i++) { - if(sig->operations[i].attrInfoLen > TcKeyConf::SimpleReadBit) + if(sig->operations[i].attrInfoLen > TcKeyConf::DirtyReadBit) fprintf(output, " apiOperationPtr: H'%.8x, simplereadnode: %u\n", sig->operations[i].apiOperationPtr, - sig->operations[i].attrInfoLen & (~TcKeyConf::SimpleReadBit)); + sig->operations[i].attrInfoLen & (~TcKeyConf::DirtyReadBit)); else fprintf(output, " apiOperationPtr: H'%.8x, attrInfoLen: %u\n", diff --git a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 0521c01248a..35b1a91e9da 100644 --- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -45,7 +45,8 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string, Uint32 version, Uint32 node_type, - const char * _bindaddress) + const char * _bindaddress, + int timeout_ms) { DBUG_ENTER("ConfigRetriever::ConfigRetriever"); @@ -61,6 +62,8 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string, DBUG_VOID_RETURN; } + ndb_mgm_set_timeout(m_handle, timeout_ms); + if (ndb_mgm_set_connectstring(m_handle, _connect_string)) { BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); @@ -157,14 +160,15 @@ ConfigRetriever::getConfig() { } ndb_mgm_configuration * -ConfigRetriever::getConfig(NdbMgmHandle m_handle) +ConfigRetriever::getConfig(NdbMgmHandle m_handle_arg) { - ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version); + ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle_arg, + m_version); if(conf == 0) { - BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); + BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle_arg)); tmp.append(" : "); - tmp.append(ndb_mgm_get_latest_error_desc(m_handle)); + tmp.append(ndb_mgm_get_latest_error_desc(m_handle_arg)); setError(CR_ERROR, tmp.c_str()); return 0; } diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c index f69c42c0ca0..7e54984794f 100644 --- a/storage/ndb/src/common/portlib/NdbTick.c +++ b/storage/ndb/src/common/portlib/NdbTick.c @@ -60,9 +60,9 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ int res = gettimeofday(&tick_time, 0); if(secs==0) { - NDB_TICKS secs = tick_time.tv_sec; + NDB_TICKS local_secs = tick_time.tv_sec; *micros = tick_time.tv_usec; - *micros = secs*1000000+*micros; + *micros = local_secs*1000000+*micros; } else { * secs = tick_time.tv_sec; * micros = tick_time.tv_usec; diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.cpp b/storage/ndb/src/common/transporter/SCI_Transporter.cpp index 138b79acb51..0720fe84973 100644 --- a/storage/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/storage/ndb/src/common/transporter/SCI_Transporter.cpp @@ -65,13 +65,10 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, m_initLocal=false; - m_swapCounter=0; m_failCounter=0; m_remoteNodes[0]=remoteSciNodeId0; m_remoteNodes[1]=remoteSciNodeId1; m_adapters = nAdapters; - // The maximum number of times to try and create, - // start and destroy a sequence m_ActiveAdapterId=0; m_StandbyAdapterId=1; @@ -102,8 +99,6 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, DBUG_VOID_RETURN; } - - void SCI_Transporter::disconnectImpl() { DBUG_ENTER("SCI_Transporter::disconnectImpl"); @@ -129,7 +124,8 @@ void SCI_Transporter::disconnectImpl() if(err != SCI_ERR_OK) { report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL); - DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x", + DBUG_PRINT("error", + ("Cannot close channel to the driver. Error code 0x%x", err)); } } @@ -164,19 +160,18 @@ bool SCI_Transporter::initTransporter() { m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4]; m_sendBuffer.m_dataSize = 0; - DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d", + DBUG_PRINT("info", + ("Created SCI Send Buffer with buffer size %d and packet size %d", m_sendBuffer.m_sendBufferSize, m_PacketSize * 4)); if(!getLinkStatus(m_ActiveAdapterId) || (m_adapters > 1 && !getLinkStatus(m_StandbyAdapterId))) { - DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches")); - //reportDisconnect(remoteNodeId, 0); - //doDisconnect(); + DBUG_PRINT("error", + ("The link is not fully operational. Check the cables and the switches")); //NDB should terminate report_error(TE_SCI_LINK_ERROR); DBUG_RETURN(false); } - DBUG_RETURN(true); } // initTransporter() @@ -235,7 +230,8 @@ sci_error_t SCI_Transporter::initLocalSegment() { DBUG_PRINT("info", ("SCInode iD %d adapter %d\n", sciAdapters[i].localSciNodeId, i)); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x", + DBUG_PRINT("error", + ("Cannot open an SCI virtual device. Error code 0x%x", err)); DBUG_RETURN(err); } @@ -269,7 +265,8 @@ sci_error_t SCI_Transporter::initLocalSegment() { &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n", + DBUG_PRINT("error", + ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n", err)); DBUG_RETURN(err); } @@ -303,15 +300,13 @@ sci_error_t SCI_Transporter::initLocalSegment() { &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n", + DBUG_PRINT("error", + ("Local Segment is not available for remote connections. Error code 0x%x\n", err)); DBUG_RETURN(err); } } - - setupLocalSegment(); - DBUG_RETURN(err); } // initLocalSegment() @@ -343,12 +338,6 @@ bool SCI_Transporter::doSend() { if(sizeToSend==4097) i4097++; #endif - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Start sequence failed")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - tryagain: retry++; @@ -374,119 +363,36 @@ bool SCI_Transporter::doSend() { SCI_FLAG_ERROR_CHECK, &err); - if (err != SCI_ERR_OK) { - if(err == SCI_ERR_OUT_OF_RANGE) { - DBUG_PRINT("error", ("Data transfer : out of range error")); - goto tryagain; - } - if(err == SCI_ERR_SIZE_ALIGNMENT) { - DBUG_PRINT("error", ("Data transfer : alignment error")); - DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend)); - goto tryagain; - } - if(err == SCI_ERR_OFFSET_ALIGNMENT) { - DBUG_PRINT("error", ("Data transfer : offset alignment")); - goto tryagain; - } - if(err == SCI_ERR_TRANSFER_FAILED) { - //(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock(); - if(getLinkStatus(m_ActiveAdapterId)) { - goto tryagain; - } - if (m_adapters == 1) { - DBUG_PRINT("error", ("SCI Transfer failed")); + if (err == SCI_ERR_OUT_OF_RANGE || + err == SCI_ERR_SIZE_ALIGNMENT || + err == SCI_ERR_OFFSET_ALIGNMENT) { + DBUG_PRINT("error", ("Data transfer error = %d", err)); report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); return false; - } - m_failCounter++; - Uint32 temp=m_ActiveAdapterId; - switch(m_swapCounter) { - case 0: - /**swap from active (0) to standby (1)*/ - if(getLinkStatus(m_StandbyAdapterId)) { - DBUG_PRINT("error", ("Swapping from adapter 0 to 1")); + } + if(err == SCI_ERR_TRANSFER_FAILED) { + if(getLinkStatus(m_ActiveAdapterId)) + goto tryagain; + if (m_adapters == 1) { + DBUG_PRINT("error", ("SCI Transfer failed")); + report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); + return false; + } + m_failCounter++; + Uint32 temp=m_ActiveAdapterId; + if (getLinkStatus(m_StandbyAdapterId)) { failoverShmWriter(); SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0); m_ActiveAdapterId=m_StandbyAdapterId; m_StandbyAdapterId=temp; - SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence), - FLAGS, - &err); - if(err!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); - DBUG_PRINT("error", ("Unable to remove sequence")); - return false; - } - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Start sequence failed")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - m_swapCounter++; - DBUG_PRINT("info", ("failover complete")); - goto tryagain; - } else { - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - DBUG_PRINT("error", ("SCI Transfer failed")); - return false; - } - return false; - break; - case 1: - /** swap back from 1 to 0 - must check that the link is up */ - - if(getLinkStatus(m_StandbyAdapterId)) { - failoverShmWriter(); - m_ActiveAdapterId=m_StandbyAdapterId; - m_StandbyAdapterId=temp; - DBUG_PRINT("info", ("Swapping from 1 to 0")); - if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to create sequence")); - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - return false; - } - if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("startSequence failed... disconnecting")); - report_error(TE_SCI_UNABLE_TO_START_SEQUENCE); - return false; - } - - SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence) - , FLAGS, - &err); - if(err!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to remove sequence")); - report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE); - return false; - } - - if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { - DBUG_PRINT("error", ("Unable to create sequence on standby")); - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - return false; - } - - m_swapCounter=0; - - DBUG_PRINT("info", ("failover complete..")); - goto tryagain; - + DBUG_PRINT("error", ("Swapping from adapter %u to %u", + m_StandbyAdapterId, m_ActiveAdapterId)); } else { - DBUG_PRINT("error", ("Unrecoverable data transfer error")); report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; + DBUG_PRINT("error", ("SCI Transfer failed")); } - - break; - default: - DBUG_PRINT("error", ("Unrecoverable data transfer error")); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - break; - } - } + } } else { SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer); writer->updateWritePtr(sizeToSend); @@ -497,7 +403,6 @@ bool SCI_Transporter::doSend() { m_sendBuffer.m_dataSize = 0; m_sendBuffer.m_forceSendLimit = sendLimit; } - } else { /** * If we end up here, the SCI segment is full. @@ -552,15 +457,12 @@ void SCI_Transporter::setupLocalSegment() DBUG_VOID_RETURN; } //setupLocalSegment - - void SCI_Transporter::setupRemoteSegment() { DBUG_ENTER("SCI_Transporter::setupRemoteSegment"); Uint32 sharedSize = 0; sharedSize =4096; //start of the buffer is page aligned - Uint32 sizeOfBuffer = m_BufferSize; const Uint32 slack = MAX_MESSAGE_SIZE; sizeOfBuffer -= sharedSize; @@ -666,7 +568,6 @@ SCI_Transporter::init_remote() DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err)); DBUG_RETURN(false); } - } // Map the remote memory segment into program space for(Uint32 i=0; i < m_adapters ; i++) { @@ -679,13 +580,14 @@ SCI_Transporter::init_remote() FLAGS, &err); - - if(err!= SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err)); - //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); - DBUG_RETURN(false); - } + if(err!= SCI_ERR_OK) { + DBUG_PRINT("error", + ("Cannot map a segment to the remote node %d. Error code 0x%x", + m_RemoteSciNodeId, err)); + //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! + report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); + DBUG_RETURN(false); + } } m_mapped=true; setupRemoteSegment(); @@ -713,7 +615,6 @@ SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) NDB_CLOSE_SOCKET(sockfd); DBUG_RETURN(false); } - if (!init_local()) { NDB_CLOSE_SOCKET(sockfd); DBUG_RETURN(false); @@ -788,29 +689,9 @@ sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) { &(m_TargetSegm[adapterid].sequence), SCI_FLAG_FAST_BARRIER, &err); - - return err; } // createSequence() - -sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) { - - sci_error_t err; - /** Perform preliminary error check on an SCI adapter before starting a - * sequence of read and write operations on the mapped segment. - */ - m_SequenceStatus = SCIStartSequence( - (m_TargetSegm[adapterid].sequence), - FLAGS, &err); - - - // If there still is an error then data cannot be safely send - return err; -} // startSequence() - - - bool SCI_Transporter::disconnectLocal() { DBUG_ENTER("SCI_Transporter::disconnectLocal"); @@ -878,9 +759,6 @@ SCI_Transporter::~SCI_Transporter() { DBUG_VOID_RETURN; } // ~SCI_Transporter() - - - void SCI_Transporter::closeSCI() { // Termination of SCI sci_error_t err; @@ -897,8 +775,9 @@ void SCI_Transporter::closeSCI() { SCIClose(activeSCIDescriptor, FLAGS, &err); if(err != SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x", - err)); + DBUG_PRINT("error", + ("Cannot close SCI channel to the driver. Error code 0x%x", + err)); } SCITerminate(); DBUG_VOID_RETURN; @@ -973,7 +852,6 @@ SCI_Transporter::getConnectionStatus() { return false; } - void SCI_Transporter::setConnected() { *m_remoteStatusFlag = SCICONNECTED; @@ -983,7 +861,6 @@ SCI_Transporter::setConnected() { *m_localStatusFlag = SCICONNECTED; } - void SCI_Transporter::setDisconnect() { if(getLinkStatus(m_ActiveAdapterId)) @@ -994,7 +871,6 @@ SCI_Transporter::setDisconnect() { } } - bool SCI_Transporter::checkConnected() { if (*m_localStatusFlag == SCIDISCONNECT) { @@ -1015,8 +891,9 @@ SCI_Transporter::initSCI() { SCIInitialize(0, &error); if(error != SCI_ERR_OK) { DBUG_PRINT("error", ("Cannot initialize SISCI library.")); - DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", - error)); + DBUG_PRINT("error", + ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", + error)); DBUG_RETURN(false); } init = true; @@ -1029,3 +906,4 @@ SCI_Transporter::get_free_buffer() const { return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer(); } + diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.hpp b/storage/ndb/src/common/transporter/SCI_Transporter.hpp index fbba2ac4516..f774186f238 100644 --- a/storage/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/storage/ndb/src/common/transporter/SCI_Transporter.hpp @@ -54,12 +54,12 @@ * local segment, the SCI transporter connects to a segment created by another * transporter at a remote node, and the maps the remote segment into its * virtual address space. However, since NDB Cluster relies on redundancy - * at the network level, by using dual SCI adapters communica - * + * at the network level, by using dual SCI adapters communication can be + * maintained even if one of the adapter cards fails (or anything on the + * network this adapter card exists in e.g. an SCI switch failure). * */ - /** * class SCITransporter * @brief - main class for the SCI transporter. @@ -84,16 +84,6 @@ public: sci_error_t createSequence(Uint32 adapterid); - /** - * starts a sequence for error checking. - * The actual checking that a sequence is correct is done implicitly - * in SCIMemCpy (in doSend). - * @param adapterid the adapter on which to start the sequence. - * @return SCI_ERR_OK if ok, otherwize something else. - */ - sci_error_t startSequence(Uint32 adapterid); - - /** Initiate Local Segment: create a memory segment, * prepare a memory segment, map the local segment * into memory space and make segment available. @@ -159,7 +149,6 @@ private: bool m_mapped; bool m_initLocal; bool m_sciinit; - Uint32 m_swapCounter; Uint32 m_failCounter; /** * For statistics on transfered packets @@ -195,7 +184,6 @@ private: */ Uint32 m_reportFreq; - Uint32 m_adapters; Uint32 m_numberOfRemoteNodes; diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.cpp index e0c2e726a92..3ce21940254 100644 --- a/storage/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/storage/ndb/src/common/transporter/SHM_Transporter.cpp @@ -31,7 +31,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, - bool isMgmConnection, + bool isMgmConnection_arg, NodeId lNodeId, NodeId rNodeId, NodeId serverNodeId, @@ -40,7 +40,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, key_t _shmKey, Uint32 _shmSize) : Transporter(t_reg, tt_SHM_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection, + lHostName, rHostName, r_port, isMgmConnection_arg, lNodeId, rNodeId, serverNodeId, 0, false, checksum, signalId), shmKey(_shmKey), diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp index 9cf5bef35e0..298e43710b0 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp @@ -64,14 +64,14 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, - bool isMgmConnection, + bool isMgmConnection_arg, NodeId lNodeId, NodeId rNodeId, NodeId serverNodeId, bool chksm, bool signalId, Uint32 _reportFreq) : Transporter(t_reg, tt_TCP_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection, + lHostName, rHostName, r_port, isMgmConnection_arg, lNodeId, rNodeId, serverNodeId, 0, false, chksm, signalId), m_sendBuffer(sendBufSize) @@ -152,6 +152,8 @@ TCP_Transporter::initTransporter() { void TCP_Transporter::setSocketOptions(){ + int sockOptKeepAlive = 1; + if (setsockopt(theSocket, SOL_SOCKET, SO_RCVBUF, (char*)&sockOptRcvBufSize, sizeof(sockOptRcvBufSize)) < 0) { #ifdef DEBUG_TRANSPORTER @@ -166,6 +168,11 @@ TCP_Transporter::setSocketOptions(){ #endif }//if + if (setsockopt(theSocket, SOL_SOCKET, SO_KEEPALIVE, + (char*)&sockOptKeepAlive, sizeof(sockOptKeepAlive)) < 0) { + ndbout_c("The setsockopt SO_KEEPALIVE error code = %d", InetErrno); + }//if + //----------------------------------------------- // Set the TCP_NODELAY option so also small packets are sent // as soon as possible diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp index 231732aea19..ed1a154c944 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp @@ -33,6 +33,7 @@ struct ReceiveBuffer { Uint32 sizeOfData; // In bytes Uint32 sizeOfBuffer; + ReceiveBuffer() {} bool init(int bytes); void destroy(); diff --git a/storage/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp index 20b6be8ce26..cec018575e0 100644 --- a/storage/ndb/src/common/transporter/Transporter.cpp +++ b/storage/ndb/src/common/transporter/Transporter.cpp @@ -107,7 +107,7 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) { { struct sockaddr_in addr; SOCKET_SIZE_TYPE addrlen= sizeof(addr); - int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); + getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); m_connect_address= (&addr)->sin_addr; } @@ -213,7 +213,7 @@ Transporter::connect_client(NDB_SOCKET_TYPE sockfd) { { struct sockaddr_in addr; SOCKET_SIZE_TYPE addrlen= sizeof(addr); - int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); + getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); m_connect_address= (&addr)->sin_addr; } diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 3e8cb4623e3..5f5f3c17b2d 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -119,6 +119,7 @@ void TransporterRegistry::set_mgm_handle(NdbMgmHandle h) if (m_mgm_handle) ndb_mgm_destroy_handle(&m_mgm_handle); m_mgm_handle= h; + ndb_mgm_set_timeout(m_mgm_handle, 5000); #ifndef DBUG_OFF if (h) { @@ -1063,9 +1064,16 @@ TransporterRegistry::update_connections() void TransporterRegistry::start_clients_thread() { + int persist_mgm_count= 0; DBUG_ENTER("TransporterRegistry::start_clients_thread"); while (m_run_start_clients_thread) { NdbSleep_MilliSleep(100); + persist_mgm_count++; + if(persist_mgm_count==50) + { + ndb_mgm_check_connection(m_mgm_handle); + persist_mgm_count= 0; + } for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){ Transporter * t = theTransporters[i]; if (!t) @@ -1123,7 +1131,12 @@ TransporterRegistry::start_clients_thread() { g_eventLogger.info("Management server closed connection early. " "It is probably being shut down (or has problems). " - "We will retry the connection."); + "We will retry the connection. %d %s %s line: %d", + ndb_mgm_get_latest_error(m_mgm_handle), + ndb_mgm_get_latest_error_desc(m_mgm_handle), + ndb_mgm_get_latest_error_msg(m_mgm_handle), + ndb_mgm_get_latest_error_line(m_mgm_handle) + ); } } /** else @@ -1214,8 +1227,6 @@ TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, bool TransporterRegistry::start_service(SocketServer& socket_server) { - struct ndb_mgm_reply mgm_reply; - DBUG_ENTER("TransporterRegistry::start_service"); if (m_transporter_interface.size() > 0 && !nodeIdSpecified) { diff --git a/storage/ndb/src/common/util/BaseString.cpp b/storage/ndb/src/common/util/BaseString.cpp index 6f20ae6a002..7e5adf0e9ef 100644 --- a/storage/ndb/src/common/util/BaseString.cpp +++ b/storage/ndb/src/common/util/BaseString.cpp @@ -16,19 +16,36 @@ /* -*- c-basic-offset: 4; -*- */ #include <ndb_global.h> #include <BaseString.hpp> -#include <basestring_vsnprintf.h> +#include "basestring_vsnprintf.h" BaseString::BaseString() { m_chr = new char[1]; + if (m_chr == NULL) + { + errno = ENOMEM; + m_len = 0; + return; + } m_chr[0] = 0; m_len = 0; } BaseString::BaseString(const char* s) { + if (s == NULL) + { + m_chr = NULL; + m_len = 0; + } const size_t n = strlen(s); m_chr = new char[n + 1]; + if (m_chr == NULL) + { + errno = ENOMEM; + m_len = 0; + return; + } memcpy(m_chr, s, n + 1); m_len = n; } @@ -37,7 +54,20 @@ BaseString::BaseString(const BaseString& str) { const char* const s = str.m_chr; const size_t n = str.m_len; + if (s == NULL) + { + m_chr = NULL; + m_len = 0; + return; + } char* t = new char[n + 1]; + if (t == NULL) + { + errno = ENOMEM; + m_chr = NULL; + m_len = 0; + return; + } memcpy(t, s, n + 1); m_chr = t; m_len = n; @@ -51,9 +81,23 @@ BaseString::~BaseString() BaseString& BaseString::assign(const char* s) { - const size_t n = strlen(s); + if (s == NULL) + { + m_chr = NULL; + m_len = 0; + return *this; + } + size_t n = strlen(s); char* t = new char[n + 1]; - memcpy(t, s, n + 1); + if (t) + { + memcpy(t, s, n + 1); + } + else + { + errno = ENOMEM; + n = 0; + } delete[] m_chr; m_chr = t; m_len = n; @@ -64,8 +108,16 @@ BaseString& BaseString::assign(const char* s, size_t n) { char* t = new char[n + 1]; - memcpy(t, s, n); - t[n] = 0; + if (t) + { + memcpy(t, s, n); + t[n] = 0; + } + else + { + errno = ENOMEM; + n = 0; + } delete[] m_chr; m_chr = t; m_len = n; @@ -83,10 +135,19 @@ BaseString::assign(const BaseString& str, size_t n) BaseString& BaseString::append(const char* s) { - const size_t n = strlen(s); + size_t n = strlen(s); char* t = new char[m_len + n + 1]; - memcpy(t, m_chr, m_len); - memcpy(t + m_len, s, n + 1); + if (t) + { + memcpy(t, m_chr, m_len); + memcpy(t + m_len, s, n + 1); + } + else + { + errno = ENOMEM; + m_len = 0; + n = 0; + } delete[] m_chr; m_chr = t; m_len += n; @@ -130,8 +191,14 @@ BaseString::assfmt(const char *fmt, ...) l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1; va_end(ap); if(l > (int)m_len) { + char *t = new char[l]; + if (t == NULL) + { + errno = ENOMEM; + return *this; + } delete[] m_chr; - m_chr = new char[l]; + m_chr = t; } va_start(ap, fmt); basestring_vsnprintf(m_chr, l, fmt, ap); @@ -155,6 +222,11 @@ BaseString::appfmt(const char *fmt, ...) l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1; va_end(ap); char *tmp = new char[l]; + if (tmp == NULL) + { + errno = ENOMEM; + return *this; + } va_start(ap, fmt); basestring_vsnprintf(tmp, l, fmt, ap); va_end(ap); @@ -242,9 +314,28 @@ BaseString::argify(const char *argv0, const char *src) { Vector<char *> vargv; if(argv0 != NULL) - vargv.push_back(strdup(argv0)); + { + char *t = strdup(argv0); + if (t == NULL) + { + errno = ENOMEM; + return NULL; + } + if (vargv.push_back(t)) + { + free(t); + return NULL; + } + } char *tmp = new char[strlen(src)+1]; + if (tmp == NULL) + { + for(size_t i = 0; i < vargv.size(); i++) + free(vargv[i]); + errno = ENOMEM; + return NULL; + } char *dst = tmp; const char *end = src + strlen(src); /* Copy characters from src to destination, while compacting them @@ -287,20 +378,48 @@ BaseString::argify(const char *argv0, const char *src) { /* Make sure the string is properly terminated */ *dst++ = '\0'; src++; - - vargv.push_back(strdup(begin)); + + { + char *t = strdup(begin); + if (t == NULL) + { + delete[] tmp; + for(size_t i = 0; i < vargv.size(); i++) + free(vargv[i]); + errno = ENOMEM; + return NULL; + } + if (vargv.push_back(t)) + { + free(t); + delete[] tmp; + for(size_t i = 0; i < vargv.size(); i++) + free(vargv[i]); + return NULL; + } + } } end: delete[] tmp; - vargv.push_back(NULL); + if (vargv.push_back(NULL)) + { + for(size_t i = 0; i < vargv.size(); i++) + free(vargv[i]); + return NULL; + } /* Convert the C++ Vector into a C-vector of strings, suitable for * calling execv(). */ char **argv = (char **)malloc(sizeof(*argv) * (vargv.size())); if(argv == NULL) + { + for(size_t i = 0; i < vargv.size(); i++) + free(vargv[i]); + errno = ENOMEM; return NULL; + } for(size_t i = 0; i < vargv.size(); i++){ argv[i] = vargv[i]; diff --git a/storage/ndb/src/common/util/Bitmask.cpp b/storage/ndb/src/common/util/Bitmask.cpp index cdcc7862e25..edfe2363039 100644 --- a/storage/ndb/src/common/util/Bitmask.cpp +++ b/storage/ndb/src/common/util/Bitmask.cpp @@ -16,21 +16,6 @@ #include <Bitmask.hpp> #include <NdbOut.hpp> -static -void print(const Uint32 src[], Uint32 len, Uint32 pos = 0) -{ - printf("b'"); - for(unsigned i = 0; i<len; i++) - { - if(BitmaskImpl::get((pos + len + 31) >> 5, src, i+pos)) - printf("1"); - else - printf("0"); - if((i & 31) == 31) - printf(" "); - } -} - void BitmaskImpl::getFieldImpl(const Uint32 src[], unsigned shiftL, unsigned len, Uint32 dst[]) @@ -93,6 +78,22 @@ BitmaskImpl::setFieldImpl(Uint32 dst[], } #ifdef __TEST_BITMASK__ +static +void print(const Uint32 src[], Uint32 len, Uint32 pos = 0) +{ + printf("b'"); + for(unsigned i = 0; i<len; i++) + { + if(BitmaskImpl::get((pos + len + 31) >> 5, src, i+pos)) + printf("1"); + else + printf("0"); + if((i & 31) == 31) + printf(" "); + } +} + + #define DEBUG 0 #include <Vector.hpp> static void do_test(int bitmask_size); diff --git a/storage/ndb/src/common/util/ConfigValues.cpp b/storage/ndb/src/common/util/ConfigValues.cpp index cf6dcf904a6..9309fe3fbd6 100644 --- a/storage/ndb/src/common/util/ConfigValues.cpp +++ b/storage/ndb/src/common/util/ConfigValues.cpp @@ -18,8 +18,6 @@ #include <NdbOut.hpp> #include <NdbTCP.h> -static Uint32 hash(Uint32 key, Uint32 size); -static Uint32 nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count); static bool findKey(const Uint32 * vals, Uint32 sz, Uint32 key, Uint32 * pos); /** @@ -90,18 +88,18 @@ bool ConfigValues::getByPos(Uint32 pos, Entry * result) const { assert(pos < (2 * m_size)); Uint32 keypart = m_values[pos]; - Uint32 val = m_values[pos+1]; + Uint32 val2 = m_values[pos+1]; switch(::getTypeOf(keypart)){ case IntType: case SectionType: - result->m_int = val; + result->m_int = val2; break; case StringType: - result->m_string = * getString(val); + result->m_string = * getString(val2); break; case Int64Type: - result->m_int64 = * get64(val); + result->m_int64 = * get64(val2); break; case InvalidType: default: diff --git a/storage/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp index 0482e2979eb..53e129e56a6 100644 --- a/storage/ndb/src/common/util/File.cpp +++ b/storage/ndb/src/common/util/File.cpp @@ -162,9 +162,9 @@ File_class::readChar(char* buf) } int -File_class::write(const void* buf, size_t size, size_t nitems) +File_class::write(const void* buf, size_t size_arg, size_t nitems) { - return ::fwrite(buf, size, nitems, m_file); + return ::fwrite(buf, size_arg, nitems, m_file); } int diff --git a/storage/ndb/src/common/util/InputStream.cpp b/storage/ndb/src/common/util/InputStream.cpp index 74c31cd7583..2337344d91a 100644 --- a/storage/ndb/src/common/util/InputStream.cpp +++ b/storage/ndb/src/common/util/InputStream.cpp @@ -34,14 +34,18 @@ FileInputStream::gets(char * buf, int bufLen){ } SocketInputStream::SocketInputStream(NDB_SOCKET_TYPE socket, - unsigned readTimeout) + unsigned read_timeout_ms) : m_socket(socket) { m_startover= true; - m_timeout = readTimeout; + m_timeout_remain= m_timeout_ms = read_timeout_ms; + + m_timedout= false; } char* SocketInputStream::gets(char * buf, int bufLen) { + if(timedout()) + return 0; assert(bufLen >= 2); int offset= 0; if(m_startover) @@ -52,10 +56,15 @@ SocketInputStream::gets(char * buf, int bufLen) { else offset= strlen(buf); - int res = readln_socket(m_socket, m_timeout, buf+offset, bufLen-offset, m_mutex); + int time= 0; + int res = readln_socket(m_socket, m_timeout_remain, &time, + buf+offset, bufLen-offset, m_mutex); - if(res == 0) + if(res >= 0) + m_timeout_remain-=time; + if(res == 0 || m_timeout_remain<=0) { + m_timedout= true; buf[0]=0; return buf; } @@ -63,7 +72,9 @@ SocketInputStream::gets(char * buf, int bufLen) { m_startover= true; if(res == -1) + { return 0; + } return buf; } diff --git a/storage/ndb/src/common/util/Makefile.am b/storage/ndb/src/common/util/Makefile.am index d331cce7e5c..5379a425c49 100644 --- a/storage/ndb/src/common/util/Makefile.am +++ b/storage/ndb/src/common/util/Makefile.am @@ -24,7 +24,8 @@ libgeneral_la_SOURCES = \ uucode.c random.c version.c \ strdup.c \ ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \ - Bitmask.cpp + Bitmask.cpp \ + ndb_rand.c EXTRA_PROGRAMS = testBitmask testBitmask_SOURCES = testBitmask.cpp diff --git a/storage/ndb/src/common/util/NdbSqlUtil.cpp b/storage/ndb/src/common/util/NdbSqlUtil.cpp index 1234e4ece6b..0f62d66c149 100644 --- a/storage/ndb/src/common/util/NdbSqlUtil.cpp +++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp @@ -681,8 +681,6 @@ int NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) { Uint32 n = (n1 < n2) ? n1 : n2; - char* c1 = (char*)p1; - char* c2 = (char*)p2; int ret = memcmp(p1, p2, n); return ret; } diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp index 322b270d1cf..0943e47e33f 100644 --- a/storage/ndb/src/common/util/OutputStream.cpp +++ b/storage/ndb/src/common/util/OutputStream.cpp @@ -42,24 +42,53 @@ FileOutputStream::println(const char * fmt, ...){ } SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket, - unsigned timeout){ + unsigned write_timeout_ms){ m_socket = socket; - m_timeout = timeout; + m_timeout_remain= m_timeout_ms = write_timeout_ms; + m_timedout= false; } int SocketOutputStream::print(const char * fmt, ...){ va_list ap; + + if(timedout()) + return -1; + + int time= 0; va_start(ap, fmt); - const int ret = vprint_socket(m_socket, m_timeout, fmt, ap); + int ret = vprint_socket(m_socket, m_timeout_ms, &time, fmt, ap); va_end(ap); + + if(ret >= 0) + m_timeout_remain-=time; + if(errno==ETIMEDOUT || m_timeout_remain<=0) + { + m_timedout= true; + ret= -1; + } + return ret; } int SocketOutputStream::println(const char * fmt, ...){ va_list ap; + + if(timedout()) + return -1; + + int time= 0; va_start(ap, fmt); - const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap); + int ret = vprintln_socket(m_socket, m_timeout_ms, &time, fmt, ap); va_end(ap); + + if(ret >= 0) + m_timeout_remain-=time; + if (errno==ETIMEDOUT || m_timeout_remain<=0) + { + m_timedout= true; + ret= -1; + } + return ret; } diff --git a/storage/ndb/src/common/util/Properties.cpp b/storage/ndb/src/common/util/Properties.cpp index 8d5c56affd3..11a1d8690ae 100644 --- a/storage/ndb/src/common/util/Properties.cpp +++ b/storage/ndb/src/common/util/Properties.cpp @@ -627,11 +627,11 @@ PropertiesImpl::getPropsPut(const char * name, if(nvp == 0){ Properties * tmpP = new Properties(); PropertyImpl * tmpPI = new PropertyImpl(tmp2, tmpP); - PropertyImpl * nvp = put(tmpPI); + PropertyImpl * nvp2 = put(tmpPI); delete tmpP; free(tmp2); - return ((Properties*)nvp->value)->impl->getPropsPut(tmp+1, impl); + return ((Properties*)nvp2->value)->impl->getPropsPut(tmp+1, impl); } free(tmp2); if(nvp->valueType != PropertiesType_Properties){ diff --git a/storage/ndb/src/common/util/SocketClient.cpp b/storage/ndb/src/common/util/SocketClient.cpp index c2825901929..3d1fd07d581 100644 --- a/storage/ndb/src/common/util/SocketClient.cpp +++ b/storage/ndb/src/common/util/SocketClient.cpp @@ -88,7 +88,7 @@ SocketClient::bind(const char* bindaddress, unsigned short localport) int ret = errno; NDB_CLOSE_SOCKET(m_sockfd); m_sockfd= NDB_INVALID_SOCKET; - return errno; + return ret; } if (::bind(m_sockfd, (struct sockaddr*)&local, sizeof(local)) == -1) diff --git a/storage/ndb/src/common/util/ndb_rand.c b/storage/ndb/src/common/util/ndb_rand.c new file mode 100644 index 00000000000..4fcc483cd49 --- /dev/null +++ b/storage/ndb/src/common/util/ndb_rand.c @@ -0,0 +1,40 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <ndb_rand.h> + +static unsigned long next= 1; + +/** + * ndb_rand + * + * constant time, cheap, pseudo-random number generator. + * + * NDB_RAND_MAX assumed to be 32767 + * + * This is the POSIX example for "generating the same sequence on + * different machines". Although that is not one of our requirements. + */ +int ndb_rand(void) +{ + next= next * 1103515245 + 12345; + return((unsigned)(next/65536) % 32768); +} + +void ndb_srand(unsigned seed) +{ + next= seed; +} + diff --git a/storage/ndb/src/common/util/random.c b/storage/ndb/src/common/util/random.c index 3d4a48e7ef0..20ef537d89a 100644 --- a/storage/ndb/src/common/util/random.c +++ b/storage/ndb/src/common/util/random.c @@ -197,7 +197,7 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues) unsigned int i; unsigned int j; unsigned int totalLength; - unsigned int index; + unsigned int idx; if( !seq || !inputValues ) return(-1); @@ -219,12 +219,12 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues) /*----------------------*/ /* set the array values */ /*----------------------*/ - index = 0; + idx = 0; for(i = 0; inputValues[i].length != 0; i++) { for(j = 0; j < inputValues[i].length; j++ ) { - seq->values[index] = inputValues[i].value; - index++; + seq->values[idx] = inputValues[i].value; + idx++; } } diff --git a/storage/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp index bbb76314032..dfdcd19412f 100644 --- a/storage/ndb/src/common/util/socket_io.cpp +++ b/storage/ndb/src/common/util/socket_io.cpp @@ -18,6 +18,7 @@ #include <NdbTCP.h> #include <socket_io.h> #include <NdbOut.hpp> +#include <NdbTick.h> extern "C" int @@ -47,7 +48,7 @@ read_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, char * buf, int buflen, NdbMutex *mutex){ if(buflen <= 1) return 0; @@ -62,7 +63,10 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, if(mutex) NdbMutex_Unlock(mutex); + Uint64 tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); + + *time= NdbTick_CurrentMillisecond() - tick; if(mutex) NdbMutex_Lock(mutex); @@ -126,9 +130,13 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, FD_ZERO(&readset); FD_SET(socket, &readset); - timeout.tv_sec = (timeout_millis / 1000); - timeout.tv_usec = (timeout_millis % 1000) * 1000; + timeout.tv_sec = ((timeout_millis - *time) / 1000); + timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; + + tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + if(selectRes != 1){ return -1; } @@ -139,7 +147,7 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char buf[], int len){ fd_set writeset; FD_ZERO(&writeset); @@ -148,7 +156,11 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, timeout.tv_sec = (timeout_millis / 1000); timeout.tv_usec = (timeout_millis % 1000) * 1000; + + Uint64 tick= NdbTick_CurrentMillisecond(); const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + if(selectRes != 1){ return -1; } @@ -167,10 +179,14 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, FD_ZERO(&writeset); FD_SET(socket, &writeset); - timeout.tv_sec = 1; - timeout.tv_usec = 0; - const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout); - if(selectRes != 1){ + timeout.tv_sec = ((timeout_millis - *time) / 1000); + timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; + + Uint64 tick= NdbTick_CurrentMillisecond(); + const int selectRes2 = select(socket + 1, 0, &writeset, 0, &timeout); + *time= NdbTick_CurrentMillisecond() - tick; + + if(selectRes2 != 1){ return -1; } } @@ -180,11 +196,11 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, ...){ va_list ap; va_start(ap, fmt); - int ret = vprint_socket(socket, timeout_millis, fmt, ap); + int ret = vprint_socket(socket, timeout_millis, time, fmt, ap); va_end(ap); return ret; @@ -192,18 +208,18 @@ print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -println_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +println_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, ...){ va_list ap; va_start(ap, fmt); - int ret = vprintln_socket(socket, timeout_millis, fmt, ap); + int ret = vprintln_socket(socket, timeout_millis, time, fmt, ap); va_end(ap); return ret; } extern "C" int -vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, va_list ap){ char buf[1000]; char *buf2 = buf; @@ -221,7 +237,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, } else return 0; - int ret = write_socket(socket, timeout_millis, buf2, size); + int ret = write_socket(socket, timeout_millis, time, buf2, size); if(buf2 != buf) free(buf2); return ret; @@ -229,7 +245,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, extern "C" int -vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, +vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, const char * fmt, va_list ap){ char buf[1000]; char *buf2 = buf; @@ -249,7 +265,7 @@ vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, } buf2[size-1]='\n'; - int ret = write_socket(socket, timeout_millis, buf2, size); + int ret = write_socket(socket, timeout_millis, time, buf2, size); if(buf2 != buf) free(buf2); return ret; diff --git a/storage/ndb/src/common/util/version.c b/storage/ndb/src/common/util/version.c index b2ebb87c144..56a92489131 100644 --- a/storage/ndb/src/common/util/version.c +++ b/storage/ndb/src/common/util/version.c @@ -20,26 +20,33 @@ #include <NdbEnv.h> #include <NdbOut.hpp> -Uint32 getMajor(Uint32 version) { +Uint32 ndbGetMajor(Uint32 version) { return (version >> 16) & 0xFF; } -Uint32 getMinor(Uint32 version) { +Uint32 ndbGetMinor(Uint32 version) { return (version >> 8) & 0xFF; } -Uint32 getBuild(Uint32 version) { +Uint32 ndbGetBuild(Uint32 version) { return (version >> 0) & 0xFF; } -Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build) { - return MAKE_VERSION(major, minor, build); +Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build) { + return NDB_MAKE_VERSION(major, minor, build); } -char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; -const char * getVersionString(Uint32 version, const char * status, - char *buf, unsigned sz) +const char * ndbGetOwnVersionString() +{ + static char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; + return ndbGetVersionString(NDB_VERSION, NDB_VERSION_STATUS, + ndb_version_string_buf, + sizeof(ndb_version_string_buf)); +} + +const char * ndbGetVersionString(Uint32 version, const char * status, + char *buf, unsigned sz) { if (status && status[0] != 0) basestring_snprintf(buf, sz, @@ -91,7 +98,8 @@ void ndbSetOwnVersion() {} #ifndef TEST_VERSION struct NdbUpGradeCompatible ndbCompatibleTable_full[] = { - { MAKE_VERSION(5,1,NDB_VERSION_BUILD), MAKE_VERSION(5,1,0), UG_Range}, + { MAKE_VERSION(5,1,NDB_VERSION_BUILD), MAKE_VERSION(5,1,18), UG_Range}, + { MAKE_VERSION(5,1,17), MAKE_VERSION(5,1,0), UG_Range}, { MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,12), UG_Range}, { MAKE_VERSION(5,0,11), MAKE_VERSION(5,0,2), UG_Range}, { MAKE_VERSION(4,1,NDB_VERSION_BUILD), MAKE_VERSION(4,1,15), UG_Range }, diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj index 56f9f3a8511..fb1e2fd601c 100644 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj +++ b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj @@ -12,8 +12,8 @@ <Configurations> <Configuration Name="Release|Win32" - OutputDirectory=".\Release" - IntermediateDirectory=".\Release" + OutputDirectory=".\release_obj" + IntermediateDirectory=".\release_obj" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -27,10 +27,10 @@ EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="3" PrecompiledHeaderThrough="stdafx.h" - PrecompiledHeaderFile=".\Release/CPC_GUI.pch" - AssemblerListingLocation=".\Release/" - ObjectFile=".\Release/" - ProgramDataBaseFileName=".\Release/" + PrecompiledHeaderFile=".\release_obj/CPC_GUI.pch" + AssemblerListingLocation=".\release_obj/" + ObjectFile=".\release_obj/" + ProgramDataBaseFileName=".\release_obj/" WarningLevel="3" SuppressStartupBanner="TRUE"/> <Tool @@ -39,10 +39,10 @@ Name="VCLinkerTool" AdditionalOptions="/MACHINE:I386" AdditionalDependencies="mfc42.lib" - OutputFile=".\Release/CPC_GUI.exe" + OutputFile=".\release_obj/CPC_GUI.exe" LinkIncremental="1" SuppressStartupBanner="TRUE" - ProgramDatabaseFile=".\Release/CPC_GUI.pdb" + ProgramDatabaseFile=".\release_obj/CPC_GUI.pdb" SubSystem="2"/> <Tool Name="VCMIDLTool" @@ -50,7 +50,7 @@ MkTypLibCompatible="TRUE" SuppressStartupBanner="TRUE" TargetEnvironment="1" - TypeLibraryName=".\Release/CPC_GUI.tlb"/> + TypeLibraryName=".\release_obj/CPC_GUI.tlb"/> <Tool Name="VCPostBuildEventTool"/> <Tool @@ -68,8 +68,8 @@ </Configuration> <Configuration Name="Debug|Win32" - OutputDirectory=".\Debug" - IntermediateDirectory=".\Debug" + OutputDirectory=".\debug_obj" + IntermediateDirectory=".\debug_obj" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" @@ -82,10 +82,10 @@ RuntimeLibrary="5" UsePrecompiledHeader="3" PrecompiledHeaderThrough="stdafx.h" - PrecompiledHeaderFile=".\Debug/CPC_GUI.pch" - AssemblerListingLocation=".\Debug/" - ObjectFile=".\Debug/" - ProgramDataBaseFileName=".\Debug/" + PrecompiledHeaderFile=".\debug_obj/CPC_GUI.pch" + AssemblerListingLocation=".\debug_obj/" + ObjectFile=".\debug_obj/" + ProgramDataBaseFileName=".\debug_obj/" BrowseInformation="1" WarningLevel="3" SuppressStartupBanner="TRUE" @@ -96,11 +96,11 @@ Name="VCLinkerTool" AdditionalOptions="/MACHINE:I386" AdditionalDependencies="comctl32.lib mfc70d.lib" - OutputFile=".\Debug/CPC_GUI.exe" - LinkIncremental="2" + OutputFile=".\debug_obj/CPC_GUI.exe" + LinkIncremental="1" SuppressStartupBanner="TRUE" GenerateDebugInformation="TRUE" - ProgramDatabaseFile=".\Debug/CPC_GUI.pdb" + ProgramDatabaseFile=".\debug_obj/CPC_GUI.pdb" SubSystem="2"/> <Tool Name="VCMIDLTool" @@ -108,7 +108,7 @@ MkTypLibCompatible="TRUE" SuppressStartupBanner="TRUE" TargetEnvironment="1" - TypeLibraryName=".\Debug/CPC_GUI.tlb"/> + TypeLibraryName=".\debug_obj/CPC_GUI.tlb"/> <Tool Name="VCPostBuildEventTool"/> <Tool diff --git a/storage/ndb/src/cw/cpcd/APIService.cpp b/storage/ndb/src/cw/cpcd/APIService.cpp index ca4ab733842..f60abc08817 100644 --- a/storage/ndb/src/cw/cpcd/APIService.cpp +++ b/storage/ndb/src/cw/cpcd/APIService.cpp @@ -389,7 +389,6 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */, void CPCDAPISession::showVersion(Parser_t::Context & /* unused */, const class Properties & args){ - Uint32 id; CPCD::RequestStatus rs; m_output->println("show version"); diff --git a/storage/ndb/src/cw/cpcd/CPCD.hpp b/storage/ndb/src/cw/cpcd/CPCD.hpp index 2cada43b609..4d48bba096f 100644 --- a/storage/ndb/src/cw/cpcd/CPCD.hpp +++ b/storage/ndb/src/cw/cpcd/CPCD.hpp @@ -62,6 +62,7 @@ struct CPCEvent { struct EventSubscriber { virtual void report(const CPCEvent &) = 0; + EventSubscriber() {} virtual ~EventSubscriber() {} }; diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am index dfd2e8c270b..efc828e21a9 100644 --- a/storage/ndb/src/cw/cpcd/Makefile.am +++ b/storage/ndb/src/cw/cpcd/Makefile.am @@ -26,7 +26,7 @@ LDADD_LOC = \ include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_util.mk.am -ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@ +ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@ # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp index 7021b4bc68d..d5c31d610cb 100644 --- a/storage/ndb/src/cw/cpcd/main.cpp +++ b/storage/ndb/src/cw/cpcd/main.cpp @@ -39,22 +39,22 @@ static const char *user = 0; static struct my_option my_long_options[] = { { "work-dir", 'w', "Work directory", - (gptr*) &work_dir, (gptr*) &work_dir, 0, + (uchar**) &work_dir, (uchar**) &work_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "port", 'p', "TCP port to listen on", - (gptr*) &port, (gptr*) &port, 0, + (uchar**) &port, (uchar**) &port, 0, GET_INT, REQUIRED_ARG, CPCD_DEFAULT_TCP_PORT, 0, 0, 0, 0, 0 }, { "syslog", 'S', "Log events to syslog", - (gptr*) &use_syslog, (gptr*) &use_syslog, 0, + (uchar**) &use_syslog, (uchar**) &use_syslog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "logfile", 'L', "File to log events to", - (gptr*) &logfile, (gptr*) &logfile, 0, + (uchar**) &logfile, (uchar**) &logfile, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "debug", 'D', "Enable debug mode", - (gptr*) &debug, (gptr*) &debug, 0, + (uchar**) &debug, (uchar**) &debug, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "user", 'u', "Run as user", - (gptr*) &user, (gptr*) &user, 0, + (uchar**) &user, (uchar**) &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -74,8 +74,6 @@ extern "C" static void sig_child(int signo, siginfo_t*, void*); const char *progname = "ndb_cpcd"; int main(int argc, char** argv){ - int save_argc= argc; - char** save_argv= argv; const char *load_default_groups[]= { "ndb_cpcd",0 }; MY_INIT(argv[0]); diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt index b58eeb730f3..5317d0e5c86 100644 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt @@ -4,7 +4,7 @@ Next NDBFS 2000 Next DBACC 3002 Next DBTUP 4029 Next DBLQH 5045 -Next DBDICT 6007 +Next DBDICT 6008 Next DBDIH 7193 Next DBTC 8054 Next CMVMI 9000 @@ -75,6 +75,8 @@ Delay GCP_SAVEREQ by 10 secs 7180: Crash master during master-take-over in execMASTER_LCPCONF +7183: Crash when receiving COPY_GCIREQ + 7184: Crash before starting next GCP after a node failure 7185: Dont reply to COPY_GCI_REQ where reason == GCP @@ -307,6 +309,10 @@ ABORT OF TCKEYREQ 8038 : Simulate API disconnect just after SCAN_TAB_REQ +8052 : Simulate failure of TransactionBufferMemory allocation for OI lookup + +8051 : Simulate failure of allocation for saveINDXKEYINFO + CMVMI ----- @@ -510,6 +516,7 @@ Dbdict: 6003 Crash in participant @ CreateTabReq::Prepare 6004 Crash in participant @ CreateTabReq::Commit 6005 Crash in participant @ CreateTabReq::CreateDrop +6007 Fail on readTableFile for READ_TAB_FILE1 (28770) Dbtup: 4014 - handleInsert - Out of undo buffer diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 6ad81df20be..64e2c41aa69 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -1523,7 +1523,6 @@ Backup::execCREATE_TRIG_CONF(Signal* signal) const Uint32 ptrI = conf->getConnectionPtr(); const Uint32 tableId = conf->getTableId(); const TriggerEvent::Value type = conf->getTriggerEvent(); - const Uint32 triggerId = conf->getTriggerId(); BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); @@ -2187,7 +2186,6 @@ Backup::execDROP_TRIG_CONF(Signal* signal) DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr(); const Uint32 ptrI = conf->getConnectionPtr(); - const Uint32 triggerId= conf->getTriggerId(); BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); @@ -2440,6 +2438,18 @@ Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) if(ptr.p->is_lcp()) { jam(); + if (ptr.p->ctlFilePtr == RNIL) { + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; + ndbrequire(ptr.p->errorCode != 0); + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); + ref->backupId = ptr.p->backupId; + ref->backupPtr = ptr.i; + ref->errorCode = ptr.p->errorCode; + ref->nodeId = getOwnNodeId(); + sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, + DefineBackupRef::SignalLength, JBB); + return; + } BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); @@ -4714,7 +4724,6 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) } ndbrequire(ok); - Uint32 ref= ptr.p->masterRef; ptr.p->masterRef = reference(); ptr.p->nodes.clear(); ptr.p->nodes.set(getOwnNodeId()); diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index f324f62a041..3406176d7a8 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -78,11 +78,7 @@ Cmvmi::Cmvmi(Block_context& ctx) : addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ); addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD); - addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ); addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD); - addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ); - addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF); - addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF); addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD); addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD); addRecSignal(GSN_EVENT_SUBSCRIBE_REQ, @@ -744,24 +740,6 @@ Cmvmi::execTEST_ORD(Signal * signal){ #endif } -void Cmvmi::execSTATISTICS_REQ(Signal* signal) -{ - // TODO Note ! This is only a test implementation... - - static int stat1 = 0; - jamEntry(); - - //ndbout << "data 1: " << signal->theData[1]; - - int x = signal->theData[0]; - stat1++; - signal->theData[0] = stat1; - sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB); - -}//execSTATISTICS_REQ() - - - void Cmvmi::execSTOP_ORD(Signal* signal) { jamEntry(); @@ -880,7 +858,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal) // to be able to indicate if we really introduced an error. #ifdef ERROR_INSERT TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0]; - + signal->theData[2] = 0; signal->theData[1] = tamperOrd->errorNo; signal->theData[0] = 5; sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB); @@ -888,160 +866,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal) }//execTAMPER_ORD() - - -void Cmvmi::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - jamEntry(); - switch (var) { - - // NDBCNTR_REF - - // DBTC - case TransactionDeadlockDetectionTimeout: - case TransactionInactiveTime: - case NoOfConcurrentProcessesHandleTakeover: - sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBDIH - case TimeBetweenLocalCheckpoints: - case TimeBetweenGlobalCheckpoints: - sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBLQH - case NoOfConcurrentCheckpointsDuringRestart: - case NoOfConcurrentCheckpointsAfterRestart: - sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBACC - case NoOfDiskPagesToDiskDuringRestartACC: - case NoOfDiskPagesToDiskAfterRestartACC: - sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBTUP - case NoOfDiskPagesToDiskDuringRestartTUP: - case NoOfDiskPagesToDiskAfterRestartTUP: - sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // DBDICT - - // NDBCNTR - case TimeToWaitAlive: - - // QMGR - case HeartbeatIntervalDbDb: // TODO possibly Ndbcnt too - case HeartbeatIntervalDbApi: - case ArbitTimeout: - sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB); - break; - - // NDBFS - - // CMVMI - case MaxNoOfSavedMessages: - case LockPagesInMainMemory: - case TimeBetweenWatchDogCheck: - case StopOnError: - handleSET_VAR_REQ(signal); - break; - - - // Not possible to update (this could of course be handled by each block - // instead but I havn't investigated where they belong) - case Id: - case ExecuteOnComputer: - case ShmKey: - case MaxNoOfConcurrentOperations: - case MaxNoOfConcurrentTransactions: - case MemorySpaceIndexes: - case MemorySpaceTuples: - case MemoryDiskPages: - case NoOfFreeDiskClusters: - case NoOfDiskClusters: - case NoOfFragmentLogFiles: - case NoOfDiskClustersPerDiskFile: - case NoOfDiskFiles: - case MaxNoOfSavedEvents: - default: - - int mgmtSrvr = setVarReq->mgmtSrvrBlockRef(); - sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB); - } // switch - -#endif -}//execSET_VAR_REQ() - - -void Cmvmi::execSET_VAR_CONF(Signal* signal) -{ - int mgmtSrvr = signal->theData[0]; - sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB); - -}//execSET_VAR_CONF() - - -void Cmvmi::execSET_VAR_REF(Signal* signal) -{ - int mgmtSrvr = signal->theData[0]; - sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB); - -}//execSET_VAR_REF() - - -void Cmvmi::handleSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - switch (var) { - case MaxNoOfSavedMessages: - m_ctx.m_config.maxNoOfErrorLogs(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case LockPagesInMainMemory: - int result; - if (val == 0) { - result = NdbMem_MemUnlockAll(); - } - else { - result = NdbMem_MemLockAll(); - } - if (result == 0) { - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - } - else { - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } - break; - - case TimeBetweenWatchDogCheck: - m_ctx.m_config.timeBetweenWatchDogCheck(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case StopOnError: - m_ctx.m_config.stopOnError(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - return; - } // switch -#endif -} - #ifdef VM_TRACE class RefSignalTest { public: @@ -1386,7 +1210,7 @@ Cmvmi::execTESTSIG(Signal* signal){ fprintf(stdout, "\n"); for(i = 0; i<signal->header.m_noOfSections; i++){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); ndbout_c("-- Section %d --", i); signal->getSection(ptr, i); ndbrequire(ptr.p != 0); @@ -1444,7 +1268,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1493,7 +1317,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1559,7 +1383,7 @@ Cmvmi::execTESTSIG(Signal* signal){ const Uint32 secs = signal->getNoOfSections(); memset(g_test, 0, sizeof(g_test)); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); g_test[i].sz = sptr.sz; g_test[i].p = new Uint32[sptr.sz]; diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp index 208f2511c6d..bc88f1a0c63 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp @@ -55,20 +55,14 @@ private: void execSIZEALT_ACK(Signal* signal); void execTEST_ORD(Signal* signal); - void execSTATISTICS_REQ(Signal* signal); void execSTOP_ORD(Signal* signal); void execSTART_ORD(Signal* signal); void execTAMPER_ORD(Signal* signal); - void execSET_VAR_REQ(Signal* signal); - void execSET_VAR_CONF(Signal* signal); - void execSET_VAR_REF(Signal* signal); void execDUMP_STATE_ORD(Signal* signal); void execEVENT_SUBSCRIBE_REQ(Signal *); void cancelSubscription(NodeId nodeId); - - void handleSET_VAR_REQ(Signal* signal); void execTESTSIG(Signal* signal); void execNODE_START_REP(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index e75e88d95fa..21114c6a155 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -537,6 +537,7 @@ struct Operationrec { ,OP_INITIAL = ~(Uint32)0 }; + Operationrec() {} bool is_same_trans(const Operationrec* op) const { return transId1 == op->transId1 && transId2 == op->transId2; @@ -660,7 +661,6 @@ private: void execNDB_STTOR(Signal* signal); void execDROP_TAB_REQ(Signal* signal); void execREAD_CONFIG_REQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execDUMP_STATE_ORD(Signal* signal); // Statement blocks diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp index 87db12cea51..9ba164d264c 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp @@ -113,7 +113,6 @@ Dbacc::Dbacc(Block_context& ctx): addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR); addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ); addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ); initData(); diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index d34cfb159a4..63d22bd0a37 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -699,7 +699,6 @@ Dbacc::execDROP_TAB_REQ(Signal* signal){ void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId) { - FragmentrecPtr rootPtr; TabrecPtr tabPtr; tabPtr.i = tableId; ptrCheckGuard(tabPtr, ctablesize, tabrec); @@ -2138,6 +2137,7 @@ Dbacc::placeReadInLockQueue(OperationrecPtr lockOwnerPtr) if (same && (lastbits & Operationrec::OP_ACC_LOCK_MODE)) { jam(); + opbits |= Operationrec::OP_LOCK_MODE; // Upgrade to X-lock goto checkop; } @@ -2268,7 +2268,6 @@ void Dbacc::execACCMINUPDATE(Signal* signal) Page8Ptr ulkPageidptr; Uint32 tulkLocalPtr; Uint32 tlocalkey1, tlocalkey2; - Uint32 TlogStart; jamEntry(); operationRecPtr.i = signal->theData[0]; @@ -8523,33 +8522,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal) #endif }//Dbacc::execDUMP_STATE_ORD() -void Dbacc::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - - case NoOfDiskPagesToDiskAfterRestartACC: - clblPagesPerTick = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfDiskPagesToDiskDuringRestartACC: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif - -}//execSET_VAR_REQ() - void Dbacc::execREAD_PSEUDO_REQ(Signal* signal){ jamEntry(); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 4222a43264a..569958a6aa9 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -18,6 +18,7 @@ #define DBDICT_C #include "Dbdict.hpp" +#include "diskpage.hpp" #include <ndb_limits.h> #include <NdbOut.hpp> @@ -85,6 +86,9 @@ #include <signaldata/CreateObj.hpp> #include <SLList.hpp> +#include <EventLogger.hpp> +extern EventLogger g_eventLogger; + #define ZNOT_FOUND 626 #define ZALREADYEXIST 630 @@ -187,7 +191,7 @@ struct { 0, 0, 0, 0, &Dbdict::drop_undofile_prepare_start, 0, 0, - 0, 0, + 0, &Dbdict::drop_undofile_commit_complete, 0, 0, 0 } }; @@ -291,7 +295,6 @@ Dbdict::execDUMP_STATE_ORD(Signal* signal) for(; ok; ok = c_obj_hash.next(iter)) { Rope name(c_rope_pool, iter.curr.p->m_name); - const Uint32 size = name.size(); char buf[1024]; name.copy(buf); ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count); @@ -464,6 +467,7 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); + w.add(DictTabInfo::SingleUserMode, tablePtr.p->singleUserMode); if(signal) { @@ -798,6 +802,15 @@ void Dbdict::execFSREADCONF(Signal* signal) readSchemaConf(signal ,fsPtr); break; case FsConnectRecord::READ_TAB_FILE1: + if(ERROR_INSERTED(6007)){ + jam(); + FsRef * const fsRef = (FsRef *)&signal->theData[0]; + fsRef->userPointer = fsConf->userPointer; + fsRef->setErrorCode(fsRef->errorCode, NDBD_EXIT_AFS_UNKNOWN); + fsRef->osErrorCode = ~0; // Indicate local error + execFSREADREF(signal); + return; + }//Testing how DICT behave if read of file 1 fails (Bug#28770) case FsConnectRecord::READ_TAB_FILE2: jam(); readTableConf(signal ,fsPtr); @@ -1098,7 +1111,6 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, SchemaFile::TableEntry* te, Callback* callback, bool savetodisk){ jam(); - ndbrequire(tableId < c_tableRecordPool.getSize()); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); @@ -1377,18 +1389,36 @@ void Dbdict::readSchemaConf(Signal* signal, for (Uint32 n = 0; n < xsf->noOfPages; n++) { SchemaFile * sf = &xsf->schemaPage[n]; - bool ok = - memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 && - sf->FileSize != 0 && - sf->FileSize % NDB_SF_PAGE_SIZE == 0 && - sf->FileSize == sf0->FileSize && - sf->PageNumber == n && - computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0; - ndbrequireErr(ok || !crashInd, NDBD_EXIT_SR_SCHEMAFILE); - if (! ok) { - jam(); + bool ok = false; + const char *reason; + if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0) + { jam(); reason = "magic code"; } + else if (sf->FileSize == 0) + { jam(); reason = "file size == 0"; } + else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0) + { jam(); reason = "invalid size multiple"; } + else if (sf->FileSize != sf0->FileSize) + { jam(); reason = "invalid size"; } + else if (sf->PageNumber != n) + { jam(); reason = "invalid page number"; } + else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0) + { jam(); reason = "invalid checksum"; } + else + ok = true; + + if (!ok) + { + char reason_msg[128]; + snprintf(reason_msg, sizeof(reason_msg), + "schema file corrupt, page %u (%s, " + "sz=%u sz0=%u pn=%u)", + n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber); + if (crashInd) + progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg); ndbrequireErr(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1, NDBD_EXIT_SR_SCHEMAFILE); + jam(); + infoEvent("primary %s, trying backup", reason_msg); readSchemaRef(signal, fsPtr); return; } @@ -1881,6 +1911,7 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->m_bits = 0; tablePtr.p->minRowsLow = 0; tablePtr.p->minRowsHigh = 0; + tablePtr.p->singleUserMode = 0; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; // volatile elements @@ -2667,25 +2698,63 @@ checkSchemaStatus(Uint32 tableType, Uint32 pass) case DictTabInfo::IndexTrigger: return false; case DictTabInfo::LogfileGroup: - return pass == 0; + return pass == 0 || pass == 9 || pass == 10; case DictTabInfo::Tablespace: - return pass == 1; + return pass == 1 || pass == 8 || pass == 11; case DictTabInfo::Datafile: case DictTabInfo::Undofile: - return pass == 2; + return pass == 2 || pass == 7 || pass == 12; case DictTabInfo::SystemTable: case DictTabInfo::UserTable: - return pass == 3; + return /* pass == 3 || pass == 6 || */ pass == 13; case DictTabInfo::UniqueHashIndex: case DictTabInfo::HashIndex: case DictTabInfo::UniqueOrderedIndex: case DictTabInfo::OrderedIndex: - return pass == 4; + return /* pass == 4 || pass == 5 || */ pass == 14; } return false; } +static const Uint32 CREATE_OLD_PASS = 4; +static const Uint32 DROP_OLD_PASS = 9; +static const Uint32 CREATE_NEW_PASS = 14; +static const Uint32 LAST_PASS = 14; + +NdbOut& +operator<<(NdbOut& out, const SchemaFile::TableEntry entry) +{ + out << "["; + out << " state: " << entry.m_tableState; + out << " version: " << hex << entry.m_tableVersion << dec; + out << " type: " << entry.m_tableType; + out << " words: " << entry.m_info_words; + out << " gcp: " << entry.m_gcp; + out << " ]"; + return out; +} + +/** + * Pass 0 Create old LogfileGroup + * Pass 1 Create old Tablespace + * Pass 2 Create old Datafile/Undofile + * Pass 3 Create old Table // NOT DONE DUE TO DIH + * Pass 4 Create old Index // NOT DONE DUE TO DIH + + * Pass 5 Drop old Index // NOT DONE DUE TO DIH + * Pass 6 Drop old Table // NOT DONE DUE TO DIH + * Pass 7 Drop old Datafile/Undofile + * Pass 8 Drop old Tablespace + * Pass 9 Drop old Logfilegroup + + * Pass 10 Create new LogfileGroup + * Pass 11 Create new Tablespace + * Pass 12 Create new Datafile/Undofile + * Pass 13 Create new Table + * Pass 14 Create new Index + */ + void Dbdict::checkSchemaStatus(Signal* signal) { XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; @@ -2700,287 +2769,134 @@ void Dbdict::checkSchemaStatus(Signal* signal) Uint32 tableId = c_restartRecord.activeTable; SchemaFile::TableEntry *newEntry = getTableEntry(newxsf, tableId); SchemaFile::TableEntry *oldEntry = getTableEntry(oldxsf, tableId); - SchemaFile::TableState schemaState = + SchemaFile::TableState newSchemaState = (SchemaFile::TableState)newEntry->m_tableState; SchemaFile::TableState oldSchemaState = (SchemaFile::TableState)oldEntry->m_tableState; if (c_restartRecord.activeTable >= c_tableRecordPool.getSize()) { jam(); - ndbrequire(schemaState == SchemaFile::INIT); + ndbrequire(newSchemaState == SchemaFile::INIT); ndbrequire(oldSchemaState == SchemaFile::INIT); continue; }//if - if(!::checkSchemaStatus(oldEntry->m_tableType, c_restartRecord.m_pass)) - continue; - - if(!::checkSchemaStatus(newEntry->m_tableType, c_restartRecord.m_pass)) - continue; +//#define PRINT_SCHEMA_RESTART +#ifdef PRINT_SCHEMA_RESTART + char buf[100]; + snprintf(buf, sizeof(buf), "checkSchemaStatus: pass: %d table: %d", + c_restartRecord.m_pass, tableId); +#endif - switch(schemaState){ - case SchemaFile::INIT:{ - jam(); - bool ok = false; - switch(oldSchemaState) { - case SchemaFile::INIT: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - ok = true; - jam(); - break; - - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - ok = true; - jam(); - newEntry->m_tableState = SchemaFile::INIT; - restartDropTab(signal, tableId); - return; + if (c_restartRecord.m_pass <= CREATE_OLD_PASS) + { + if (!::checkSchemaStatus(oldEntry->m_tableType, c_restartRecord.m_pass)) + continue; - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - // Temporary table is never written to disk, so just set to INIT. - jam(); - ok = true; - newEntry->m_tableState = SchemaFile::INIT; - break; - }//switch - ndbrequire(ok); - break; - } - case SchemaFile::ADD_STARTED:{ - jam(); - bool ok = false; - switch(oldSchemaState) { - case SchemaFile::INIT: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - ok = true; - break; - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: + switch(oldSchemaState){ + case SchemaFile::INIT: jam(); + case SchemaFile::DROP_TABLE_COMMITTED: jam(); + case SchemaFile::ADD_STARTED: jam(); + case SchemaFile::DROP_TABLE_STARTED: jam(); + case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); + continue; + case SchemaFile::TABLE_ADD_COMMITTED: jam(); + case SchemaFile::ALTER_TABLE_COMMITTED: jam(); jam(); - ok = true; - //------------------------------------------------------------------ - // Add Table was started but not completed. Will be dropped in all - // nodes. Update schema information (restore table version). - //------------------------------------------------------------------ - newEntry->m_tableState = SchemaFile::INIT; - restartDropTab(signal, tableId); - return; - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ok = true; - newEntry->m_tableState = SchemaFile::INIT; - break; +#ifdef PRINT_SCHEMA_RESTART + ndbout_c("%s -> restartCreateTab", buf); + ndbout << *newEntry << " " << *oldEntry << endl; +#endif + restartCreateTab(signal, tableId, oldEntry, oldEntry, true); + return; } - ndbrequire(ok); - break; } - case SchemaFile::TABLE_ADD_COMMITTED:{ - jam(); - bool ok = false; - switch(oldSchemaState) { - case SchemaFile::INIT: - jam(); - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - ok = true; - //------------------------------------------------------------------ - // Table was added in the master node but not in our node. We can - // retrieve the table definition from the master. - //------------------------------------------------------------------ - restartCreateTab(signal, tableId, oldEntry, newEntry, false); - return; - break; - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - ok = true; - //------------------------------------------------------------------ - // Table was added in both our node and the master node. We can - // retrieve the table definition from our own disk. - //------------------------------------------------------------------ - if(newEntry->m_tableVersion == oldEntry->m_tableVersion) - { - jam(); - ndbrequire(newEntry->m_gcp == oldEntry->m_gcp); - ndbrequire(newEntry->m_tableType == oldEntry->m_tableType); - Uint32 type= oldEntry->m_tableType; - // On NR get index from master because index state is not on file - const bool file = c_systemRestart || !DictTabInfo::isIndex(type); - newEntry->m_info_words= oldEntry->m_info_words; - restartCreateTab(signal, tableId, oldEntry, newEntry, file); - return; - } else { - //------------------------------------------------------------------ - // Must be a new version of the table if anything differs. Both table - // version and global checkpoint must be different. - // This should not happen for the master node. This can happen after - // drop table followed by add table or after change table. - // Not supported in this version. - //------------------------------------------------------------------ - ndbrequire(c_masterNodeId != getOwnNodeId()); - ndbrequire(newEntry->m_tableVersion != oldEntry->m_tableVersion); - jam(); - - restartCreateTab(signal, tableId, oldEntry, newEntry, false); - return; - }//if - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ok = true; - // For NR, we must re-create the table. - // For SR, we do nothing as the table was never saved to disk. - if(!c_systemRestart) + if (c_restartRecord.m_pass <= DROP_OLD_PASS) + { + if (!::checkSchemaStatus(oldEntry->m_tableType, c_restartRecord.m_pass)) + continue; + + switch(oldSchemaState){ + case SchemaFile::INIT: jam(); + case SchemaFile::DROP_TABLE_COMMITTED: jam(); + case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); + continue; + case SchemaFile::ADD_STARTED: jam(); + case SchemaFile::DROP_TABLE_STARTED: jam(); +#ifdef PRINT_SCHEMA_RESTART + ndbout_c("%s -> restartDropTab", buf); + ndbout << *newEntry << " " << *oldEntry << endl; +#endif + restartDropTab(signal, tableId, oldEntry, newEntry); + return; + case SchemaFile::TABLE_ADD_COMMITTED: jam(); + case SchemaFile::ALTER_TABLE_COMMITTED: jam(); + if (! (* oldEntry == * newEntry)) { - restartCreateTab(signal, tableId, oldEntry, newEntry, false); +#ifdef PRINT_SCHEMA_RESTART + ndbout_c("%s -> restartDropTab", buf); + ndbout << *newEntry << " " << *oldEntry << endl; +#endif + restartDropTab(signal, tableId, oldEntry, newEntry); return; } - break; - } - ndbrequire(ok); - break; - } - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED:{ - jam(); - bool ok = false; - switch(oldSchemaState){ - case SchemaFile::INIT: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - ok = true; - break; - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - newEntry->m_tableState = SchemaFile::INIT; - restartDropTab(signal, tableId); - return; - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ok = true; - newEntry->m_tableState = SchemaFile::INIT; - break; + continue; } - ndbrequire(ok); - break; } - case SchemaFile::ALTER_TABLE_COMMITTED: { - jam(); - bool ok = false; - switch(oldSchemaState) { - case SchemaFile::INIT: - jam(); - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ok = true; - if(!c_systemRestart) + + if (c_restartRecord.m_pass <= CREATE_NEW_PASS) + { + if (!::checkSchemaStatus(newEntry->m_tableType, c_restartRecord.m_pass)) + continue; + + switch(newSchemaState){ + case SchemaFile::INIT: jam(); + case SchemaFile::DROP_TABLE_COMMITTED: jam(); + case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); + * oldEntry = * newEntry; + continue; + case SchemaFile::ADD_STARTED: jam(); + case SchemaFile::DROP_TABLE_STARTED: jam(); + ndbrequire(DictTabInfo::isTable(newEntry->m_tableType) || + DictTabInfo::isIndex(newEntry->m_tableType)); + newEntry->m_tableState = SchemaFile::INIT; + continue; + case SchemaFile::TABLE_ADD_COMMITTED: jam(); + case SchemaFile::ALTER_TABLE_COMMITTED: jam(); + if (DictTabInfo::isIndex(newEntry->m_tableType) || + DictTabInfo::isTable(newEntry->m_tableType)) { - restartCreateTab(signal, tableId, oldEntry, newEntry, false); + bool file = * oldEntry == *newEntry && + (!DictTabInfo::isIndex(newEntry->m_tableType) || c_systemRestart); + +#ifdef PRINT_SCHEMA_RESTART + ndbout_c("%s -> restartCreateTab (file: %d)", buf, file); + ndbout << *newEntry << " " << *oldEntry << endl; +#endif + restartCreateTab(signal, tableId, newEntry, newEntry, file); + * oldEntry = * newEntry; return; } - break; - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - ok = true; - //------------------------------------------------------------------ - // Table was altered in the master node but not in our node. We can - // retrieve the altered table definition from the master. - //------------------------------------------------------------------ - restartCreateTab(signal, tableId, oldEntry, newEntry, false); - return; - break; - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - ok = true; - - //------------------------------------------------------------------ - // Table was altered in both our node and the master node. We can - // retrieve the table definition from our own disk. - //------------------------------------------------------------------ - - // On NR get index from master because index state is not on file - Uint32 type= oldEntry->m_tableType; - const bool file = (* newEntry == * oldEntry) && - (c_systemRestart || !DictTabInfo::isIndex(type)); - newEntry->m_info_words= oldEntry->m_info_words; - restartCreateTab(signal, tableId, oldEntry, newEntry, file); - return; - } - ndbrequire(ok); - break; - } - case SchemaFile::TEMPORARY_TABLE_COMMITTED: { - jam(); - bool ok = false; - switch(oldSchemaState){ - case SchemaFile::INIT: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - case SchemaFile::ADD_STARTED: - jam(); - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ok = true; - if(!c_systemRestart) + else if (! (* oldEntry == *newEntry)) { - restartCreateTab(signal, tableId, oldEntry, newEntry, false); +#ifdef PRINT_SCHEMA_RESTART + ndbout_c("%s -> restartCreateTab", buf); + ndbout << *newEntry << " " << *oldEntry << endl; +#endif + restartCreateTab(signal, tableId, oldEntry, newEntry, false); + * oldEntry = * newEntry; return; - } else { - newEntry->m_tableState = SchemaFile::INIT; - } - break; + } + * oldEntry = * newEntry; + continue; } - ndbrequire(ok); - break; - } } } c_restartRecord.m_pass++; c_restartRecord.activeTable= 0; - if(c_restartRecord.m_pass <= 4) + if(c_restartRecord.m_pass <= LAST_PASS) { checkSchemaStatus(signal); } @@ -3307,7 +3223,31 @@ Dbdict::releaseCreateTableOp(Signal* signal, CreateTableRecordPtr createTabPtr) } void -Dbdict::restartDropTab(Signal* signal, Uint32 tableId){ +Dbdict::restartDropTab(Signal* signal, Uint32 tableId, + const SchemaFile::TableEntry * old_entry, + const SchemaFile::TableEntry * new_entry) +{ + switch(old_entry->m_tableType){ + case DictTabInfo::UndefTableType: + case DictTabInfo::HashIndexTrigger: + case DictTabInfo::SubscriptionTrigger: + case DictTabInfo::ReadOnlyConstraint: + case DictTabInfo::IndexTrigger: + ndbrequire(false); + case DictTabInfo::SystemTable: + case DictTabInfo::UserTable: + case DictTabInfo::UniqueHashIndex: + case DictTabInfo::HashIndex: + case DictTabInfo::UniqueOrderedIndex: + case DictTabInfo::OrderedIndex: + break; + case DictTabInfo::Tablespace: + case DictTabInfo::LogfileGroup: + case DictTabInfo::Datafile: + case DictTabInfo::Undofile: + restartDropObj(signal, tableId, old_entry); + return; + } const Uint32 key = ++c_opRecordSequence; @@ -3341,12 +3281,16 @@ Dbdict::restartDropTab_complete(Signal* signal, //@todo check error + releaseTableObject(c_restartRecord.activeTable); c_opDropTable.release(dropTabPtr); c_restartRecord.activeTable++; checkSchemaStatus(signal); } +/** + * Create Obj during NR/SR + */ void Dbdict::restartCreateObj(Signal* signal, Uint32 tableId, @@ -3575,6 +3519,170 @@ Dbdict::restartCreateObj_commit_complete_done(Signal* signal, checkSchemaStatus(signal); } +/** + * Drop object during NR/SR + */ +void +Dbdict::restartDropObj(Signal* signal, + Uint32 tableId, + const SchemaFile::TableEntry * entry) +{ + jam(); + + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.seize(dropObjPtr)); + + const Uint32 key = ++c_opRecordSequence; + dropObjPtr.p->key = key; + c_opDropObj.add(dropObjPtr); + dropObjPtr.p->m_errorCode = 0; + dropObjPtr.p->m_senderRef = reference(); + dropObjPtr.p->m_senderData = tableId; + dropObjPtr.p->m_clientRef = reference(); + dropObjPtr.p->m_clientData = tableId; + + dropObjPtr.p->m_obj_id = tableId; + dropObjPtr.p->m_obj_type = entry->m_tableType; + dropObjPtr.p->m_obj_version = entry->m_tableVersion; + + dropObjPtr.p->m_callback.m_callbackData = key; + dropObjPtr.p->m_callback.m_callbackFunction= + safe_cast(&Dbdict::restartDropObj_prepare_start_done); + + ndbout_c("Dropping %d %d", tableId, entry->m_tableType); + switch(entry->m_tableType){ + case DictTabInfo::Tablespace: + case DictTabInfo::LogfileGroup:{ + jam(); + Ptr<Filegroup> fg_ptr; + ndbrequire(c_filegroup_hash.find(fg_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; + dropObjPtr.p->m_vt_index = 3; + break; + } + case DictTabInfo::Datafile:{ + jam(); + Ptr<File> file_ptr; + dropObjPtr.p->m_vt_index = 2; + ndbrequire(c_file_hash.find(file_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = file_ptr.i; + break; + } + case DictTabInfo::Undofile:{ + jam(); + Ptr<File> file_ptr; + dropObjPtr.p->m_vt_index = 4; + ndbrequire(c_file_hash.find(file_ptr, tableId)); + dropObjPtr.p->m_obj_ptr_i = file_ptr.i; + + /** + * Undofiles are only removed from logfile groups file list + * as drop undofile is currently not supported... + * file will be dropped by lgman when dropping filegroup + */ + dropObjPtr.p->m_callback.m_callbackFunction= + safe_cast(&Dbdict::restartDropObj_commit_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); + return; + } + default: + jamLine(entry->m_tableType); + ndbrequire(false); + } + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_prepare_start_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_prepare_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_prepare_complete_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_commit_start_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + +void +Dbdict::restartDropObj_commit_start_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + dropObjPtr.p->m_callback.m_callbackFunction = + safe_cast(&Dbdict::restartDropObj_commit_complete_done); + + if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + (signal, dropObjPtr.p); + else + execute(signal, dropObjPtr.p->m_callback, 0); +} + + +void +Dbdict::restartDropObj_commit_complete_done(Signal* signal, + Uint32 callbackData, + Uint32 returnCode) +{ + jam(); + ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + ndbrequire(dropObjPtr.p->m_errorCode == 0); + + c_opDropObj.release(dropObjPtr); + + c_restartRecord.activeTable++; + checkSchemaStatus(signal); +} + /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* MODULE: NODE FAILURE HANDLING ------------------------- */ @@ -3808,7 +3916,7 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){ createTabPtr.p->m_dihAddFragPtr = RNIL; Uint32 key = c_opRecordSequence + 1; - Uint32 *theData = signal->getDataPtrSend(), i; + Uint32 *theData = signal->getDataPtrSend(); Uint16 *frag_data= (Uint16*)&signal->theData[25]; CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; req->senderRef = reference(); @@ -4962,7 +5070,6 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){ packTableIntoPages(w, tabPtr); SegmentedSectionPtr spDataPtr; - Ptr<SectionSegment> tmpTsPtr; w.getPtr(spDataPtr); signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO); @@ -5447,7 +5554,6 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 fragCount = req->totalFragments; Uint32 requestInfo = req->requestInfo; Uint32 startGci = req->startGci; - Uint32 tablespace_id= req->tablespaceId; Uint32 logPart = req->logPartId; ndbrequire(node == getOwnNodeId()); @@ -5710,7 +5816,9 @@ Dbdict::execTAB_COMMITCONF(Signal* signal){ signal->theData[4] = (Uint32)tabPtr.p->tableType; signal->theData[5] = createTabPtr.p->key; signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey; - sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB); + signal->theData[7] = (Uint32)tabPtr.p->singleUserMode; + + sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 8, JBB); return; } @@ -6142,12 +6250,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh; tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; + tablePtr.p->singleUserMode = c_tableDesc.SingleUserMode; - Uint64 maxRows = - (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; - Uint64 minRows = - (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; - { Rope frm(c_rope_pool, tablePtr.p->frmData); tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen), @@ -7574,7 +7678,6 @@ void Dbdict::execLIST_TABLES_REQ(Signal* signal) { jamEntry(); - Uint32 i; ListTablesReq * req = (ListTablesReq*)signal->getDataPtr(); Uint32 senderRef = req->senderRef; Uint32 senderData = req->senderData; @@ -8173,6 +8276,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey); w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr); w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength); + w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE); // write index key attributes for (k = 0; k < opPtr.p->m_attrList.sz; k++) { // insert the attributes in the order decided above in attrid_map @@ -9461,7 +9565,6 @@ Dbdict::createEventUTIL_PREPARE(Signal* signal, evntRecPtr.i = ref->getSenderData(); ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - Uint32 err; interpretUtilPrepareErrorCode(errorCode, evntRecPtr.p->m_errorCode, evntRecPtr.p->m_errorLine); evntRecPtr.p->m_errorNode = reference(); @@ -14066,7 +14169,8 @@ Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId) //****************************************** void -Dbdict::execCREATE_FILE_REQ(Signal* signal){ +Dbdict::execCREATE_FILE_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14100,15 +14204,25 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ break; } + if (checkSingleUserMode(senderRef)) + { + ref->errorCode = CreateFileRef::SingleUser; + ref->status = 0; + ref->errorKey = 0; + ref->errorLine = __LINE__; + break; + } + Ptr<SchemaTransaction> trans_ptr; if (! c_Trans.seize(trans_ptr)){ + jam(); ref->errorCode = CreateFileRef::Busy; ref->status = 0; ref->errorKey = 0; ref->errorLine = __LINE__; break; } - + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; trans_ptr.p->m_senderRef = senderRef; @@ -14137,6 +14251,7 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ { Uint32 objId = getFreeObjId(0); if (objId == RNIL) { + jam(); ref->errorCode = CreateFileRef::NoMoreObjectRecords; ref->status = 0; ref->errorKey = 0; @@ -14161,7 +14276,6 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ CreateObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14172,7 +14286,8 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){ } void -Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ +Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14205,15 +14320,25 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ break; } + if (checkSingleUserMode(senderRef)) + { + ref->errorCode = CreateFilegroupRef::SingleUser; + ref->status = 0; + ref->errorKey = 0; + ref->errorLine = __LINE__; + break; + } + Ptr<SchemaTransaction> trans_ptr; if (! c_Trans.seize(trans_ptr)){ + jam(); ref->errorCode = CreateFilegroupRef::Busy; ref->status = 0; ref->errorKey = 0; ref->errorLine = __LINE__; break; } - + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; trans_ptr.p->m_senderRef = senderRef; @@ -14239,6 +14364,7 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ { Uint32 objId = getFreeObjId(0); if (objId == RNIL) { + jam(); ref->errorCode = CreateFilegroupRef::NoMoreObjectRecords; ref->status = 0; ref->errorKey = 0; @@ -14263,7 +14389,6 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){ CreateObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14299,7 +14424,8 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) break; } - if (c_blockState != BS_IDLE){ + if (c_blockState != BS_IDLE) + { jam(); ref->errorCode = DropFileRef::Busy; ref->errorKey = 0; @@ -14307,9 +14433,19 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) break; } + if (checkSingleUserMode(senderRef)) + { + jam(); + ref->errorCode = DropFileRef::SingleUser; + ref->errorKey = 0; + ref->errorLine = __LINE__; + break; + } + Ptr<File> file_ptr; if (!c_file_hash.find(file_ptr, objId)) { + jam(); ref->errorCode = DropFileRef::NoSuchFile; ref->errorLine = __LINE__; break; @@ -14317,6 +14453,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) if (file_ptr.p->m_version != version) { + jam(); ref->errorCode = DropFileRef::InvalidSchemaObjectVersion; ref->errorLine = __LINE__; break; @@ -14325,10 +14462,12 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) Ptr<SchemaTransaction> trans_ptr; if (! c_Trans.seize(trans_ptr)) { - ref->errorCode = CreateFileRef::Busy; + jam(); + ref->errorCode = DropFileRef::Busy; ref->errorLine = __LINE__; break; } + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; @@ -14364,7 +14503,6 @@ Dbdict::execDROP_FILE_REQ(Signal* signal) DropObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14392,7 +14530,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) Uint32 version = req->filegroup_version; do { - if(getOwnNodeId() != c_masterNodeId){ + if(getOwnNodeId() != c_masterNodeId) + { jam(); ref->errorCode = DropFilegroupRef::NotMaster; ref->errorKey = 0; @@ -14400,7 +14539,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) break; } - if (c_blockState != BS_IDLE){ + if (c_blockState != BS_IDLE) + { jam(); ref->errorCode = DropFilegroupRef::Busy; ref->errorKey = 0; @@ -14408,9 +14548,19 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) break; } + if (checkSingleUserMode(senderRef)) + { + jam(); + ref->errorCode = DropFilegroupRef::SingleUser; + ref->errorKey = 0; + ref->errorLine = __LINE__; + break; + } + Ptr<Filegroup> filegroup_ptr; if (!c_filegroup_hash.find(filegroup_ptr, objId)) { + jam(); ref->errorCode = DropFilegroupRef::NoSuchFilegroup; ref->errorLine = __LINE__; break; @@ -14418,6 +14568,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) if (filegroup_ptr.p->m_version != version) { + jam(); ref->errorCode = DropFilegroupRef::InvalidSchemaObjectVersion; ref->errorLine = __LINE__; break; @@ -14426,10 +14577,12 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) Ptr<SchemaTransaction> trans_ptr; if (! c_Trans.seize(trans_ptr)) { - ref->errorCode = CreateFilegroupRef::Busy; + jam(); + ref->errorCode = DropFilegroupRef::Busy; ref->errorLine = __LINE__; break; } + jam(); const Uint32 trans_key = ++c_opRecordSequence; trans_ptr.p->key = trans_key; @@ -14465,7 +14618,6 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) DropObjReq::SignalLength, JBB); c_blockState = BS_CREATE_TAB; - return; } while(0); @@ -14476,15 +14628,15 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) } void -Dbdict::execCREATE_OBJ_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_OBJ_REF(Signal* signal) +{ CreateObjRef * const ref = (CreateObjRef*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != CreateObjRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14492,12 +14644,12 @@ Dbdict::execCREATE_OBJ_REF(Signal* signal){ } void -Dbdict::execCREATE_OBJ_CONF(Signal* signal){ - jamEntry(); - - CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); - +Dbdict::execCREATE_OBJ_CONF(Signal* signal) +{ Ptr<SchemaTransaction> trans_ptr; + CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } @@ -14507,6 +14659,7 @@ Dbdict::schemaOp_reply(Signal* signal, SchemaTransaction * trans_ptr_p, Uint32 nodeId) { + jam(); { SafeCounter tmp(c_counterMgr, trans_ptr_p->m_counter); if(!tmp.clearWaitingFor(nodeId)){ @@ -14517,10 +14670,8 @@ Dbdict::schemaOp_reply(Signal* signal, switch(trans_ptr_p->m_op.m_state){ case DictObjOp::Preparing:{ - if(trans_ptr_p->m_errorCode != 0) { - jam(); /** * Failed to prepare on atleast one node -> abort on all */ @@ -14530,10 +14681,16 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_abort_start_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) (signal, trans_ptr_p); + } else + { + jam(); execute(signal, trans_ptr_p->m_callback, 0); + } return; } @@ -14543,14 +14700,19 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_commit_start_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) (signal, trans_ptr_p); + } else + { + jam(); execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Committing: { - jam(); ndbrequire(trans_ptr_p->m_errorCode == 0); trans_ptr_p->m_op.m_state = DictObjOp::Committed; @@ -14559,31 +14721,42 @@ Dbdict::schemaOp_reply(Signal* signal, safe_cast(&Dbdict::trans_commit_complete_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) (signal, trans_ptr_p); + } else - execute(signal, trans_ptr_p->m_callback, 0); + { + jam(); + execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Aborting:{ - jam(); - trans_ptr_p->m_op.m_state = DictObjOp::Committed; trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; trans_ptr_p->m_callback.m_callbackFunction= safe_cast(&Dbdict::trans_abort_complete_done); if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) + { + jam(); (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) (signal, trans_ptr_p); + } else - execute(signal, trans_ptr_p->m_callback, 0); + { + jam(); + execute(signal, trans_ptr_p->m_callback, 0); + } return; } case DictObjOp::Defined: case DictObjOp::Prepared: case DictObjOp::Committed: case DictObjOp::Aborted: + jam(); break; } ndbrequire(false); @@ -14592,14 +14765,13 @@ Dbdict::schemaOp_reply(Signal* signal, void Dbdict::trans_commit_start_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr<SchemaTransaction> trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); tmp.init<DictCommitRef>(rg, GSN_DICT_COMMIT_REF, trans_ptr.p->key); @@ -14610,27 +14782,26 @@ Dbdict::trans_commit_start_done(Signal* signal, req->op_key = trans_ptr.p->m_op.m_key; sendSignal(rg, GSN_DICT_COMMIT_REQ, signal, DictCommitReq::SignalLength, JBB); - trans_ptr.p->m_op.m_state = DictObjOp::Committing; } void Dbdict::trans_commit_complete_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr<SchemaTransaction> trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ case GSN_CREATE_FILEGROUP_REQ:{ FilegroupPtr fg_ptr; + jam(); ndbrequire(c_filegroup_hash.find(fg_ptr, trans_ptr.p->m_op.m_obj_id)); - // CreateFilegroupConf * conf = (CreateFilegroupConf*)signal->getDataPtr(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; @@ -14640,11 +14811,11 @@ Dbdict::trans_commit_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_CONF, signal, CreateFilegroupConf::SignalLength, JBB); - break; } case GSN_CREATE_FILE_REQ:{ FilePtr f_ptr; + jam(); ndbrequire(c_file_hash.find(f_ptr, trans_ptr.p->m_op.m_obj_id)); CreateFileConf * conf = (CreateFileConf*)signal->getDataPtr(); conf->senderRef = reference(); @@ -14655,11 +14826,11 @@ Dbdict::trans_commit_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileConf::SignalLength, JBB); - break; } case GSN_DROP_FILE_REQ:{ DropFileConf * conf = (DropFileConf*)signal->getDataPtr(); + jam(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->fileId = trans_ptr.p->m_op.m_obj_id; @@ -14671,6 +14842,7 @@ Dbdict::trans_commit_complete_done(Signal* signal, } case GSN_DROP_FILEGROUP_REQ:{ DropFilegroupConf * conf = (DropFilegroupConf*)signal->getDataPtr(); + jam(); conf->senderRef = reference(); conf->senderData = trans_ptr.p->m_senderData; conf->filegroupId = trans_ptr.p->m_op.m_obj_id; @@ -14693,12 +14865,12 @@ Dbdict::trans_commit_complete_done(Signal* signal, void Dbdict::trans_abort_start_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr<SchemaTransaction> trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); @@ -14716,12 +14888,12 @@ Dbdict::trans_abort_start_done(Signal* signal, void Dbdict::trans_abort_complete_done(Signal* signal, Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - + Uint32 retValue) +{ Ptr<SchemaTransaction> trans_ptr; + + jam(); + ndbrequire(retValue == 0); ndbrequire(c_Trans.find(trans_ptr, callbackData)); switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ @@ -14729,6 +14901,7 @@ Dbdict::trans_abort_complete_done(Signal* signal, { // CreateFilegroupRef * ref = (CreateFilegroupRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14740,12 +14913,12 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_REF, signal, CreateFilegroupRef::SignalLength, JBB); - break; } case GSN_CREATE_FILE_REQ: { CreateFileRef * ref = (CreateFileRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14757,12 +14930,12 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_REF, signal, CreateFileRef::SignalLength, JBB); - break; } case GSN_DROP_FILE_REQ: { DropFileRef * ref = (DropFileRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14773,13 +14946,13 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILE_REF, signal, DropFileRef::SignalLength, JBB); - break; } case GSN_DROP_FILEGROUP_REQ: { // DropFilegroupRef * ref = (DropFilegroupRef*)signal->getDataPtr(); + jam(); ref->senderRef = reference(); ref->senderData = trans_ptr.p->m_senderData; ref->masterNodeId = c_masterNodeId; @@ -14790,7 +14963,6 @@ Dbdict::trans_abort_complete_done(Signal* signal, //@todo check api failed sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILEGROUP_REF, signal, DropFilegroupRef::SignalLength, JBB); - break; } default: @@ -14804,7 +14976,8 @@ Dbdict::trans_abort_complete_done(Signal* signal, } void -Dbdict::execCREATE_OBJ_REQ(Signal* signal){ +Dbdict::execCREATE_OBJ_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -14849,6 +15022,7 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){ switch(objType){ case DictTabInfo::Tablespace: case DictTabInfo::LogfileGroup: + jam(); createObjPtr.p->m_vt_index = 0; break; case DictTabInfo::Datafile: @@ -14857,7 +15031,11 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){ * Use restart code to impl. ForceCreateFile */ if (requestInfo & CreateFileReq::ForceCreateFile) - createObjPtr.p->m_restart= 2; + { + jam(); + createObjPtr.p->m_restart= 2; + } + jam(); createObjPtr.p->m_vt_index = 1; break; default: @@ -14873,10 +15051,10 @@ void Dbdict::execDICT_COMMIT_REQ(Signal* signal) { DictCommitReq* req = (DictCommitReq*)signal->getDataPtr(); - Ptr<SchemaOp> op; - ndbrequire(c_schemaOp.find(op, req->op_key)); + jamEntry(); + ndbrequire(c_schemaOp.find(op, req->op_key)); (this->*f_dict_op[op.p->m_vt_index].m_commit)(signal, op.p); } @@ -14884,23 +15062,23 @@ void Dbdict::execDICT_ABORT_REQ(Signal* signal) { DictAbortReq* req = (DictAbortReq*)signal->getDataPtr(); - Ptr<SchemaOp> op; - ndbrequire(c_schemaOp.find(op, req->op_key)); + jamEntry(); + ndbrequire(c_schemaOp.find(op, req->op_key)); (this->*f_dict_op[op.p->m_vt_index].m_abort)(signal, op.p); } void -Dbdict::execDICT_COMMIT_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_COMMIT_REF(Signal* signal) +{ DictCommitRef * const ref = (DictCommitRef*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictCommitRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14908,26 +15086,26 @@ Dbdict::execDICT_COMMIT_REF(Signal* signal){ } void -Dbdict::execDICT_COMMIT_CONF(Signal* signal){ - jamEntry(); - - DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); - +Dbdict::execDICT_COMMIT_CONF(Signal* signal) +{ Ptr<SchemaTransaction> trans_ptr; + DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } void -Dbdict::execDICT_ABORT_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_ABORT_REF(Signal* signal) +{ DictAbortRef * const ref = (DictAbortRef*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictAbortRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -14935,31 +15113,28 @@ Dbdict::execDICT_ABORT_REF(Signal* signal){ } void -Dbdict::execDICT_ABORT_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDICT_ABORT_CONF(Signal* signal) +{ DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } - - void Dbdict::createObj_prepare_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + SegmentedSectionPtr objInfoPtr; ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - SegmentedSectionPtr objInfoPtr; + jam(); getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - if(createObjPtr.p->m_errorCode != 0){ jam(); createObjPtr.p->m_obj_info_ptr_i= RNIL; @@ -14987,19 +15162,19 @@ Dbdict::createObj_prepare_start_done(Signal* signal, void Dbdict::createObj_writeSchemaConf1(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ - jam(); + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + Callback callback; + SegmentedSectionPtr objInfoPtr; + jam(); ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - Callback callback; callback.m_callbackData = createObjPtr.p->key; callback.m_callbackFunction = safe_cast(&Dbdict::createObj_writeObjConf); - SegmentedSectionPtr objInfoPtr; getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); writeTableFile(signal, createObjPtr.p->m_obj_id, objInfoPtr, &callback); @@ -15011,14 +15186,13 @@ Dbdict::createObj_writeSchemaConf1(Signal* signal, void Dbdict::createObj_writeObjConf(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ - jam(); + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_prepare_complete_done); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete) @@ -15028,12 +15202,12 @@ Dbdict::createObj_writeObjConf(Signal* signal, void Dbdict::createObj_prepare_complete_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); //@todo check for master failed @@ -15062,28 +15236,33 @@ Dbdict::createObj_prepare_complete_done(Signal* signal, } void -Dbdict::createObj_commit(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::createObj_commit(Signal * signal, SchemaOp * op) +{ OpCreateObj * createObj = (OpCreateObj*)op; + createObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_commit_start_done); if (f_dict_op[createObj->m_vt_index].m_commit_start) + { + jam(); (this->*f_dict_op[createObj->m_vt_index].m_commit_start)(signal, createObj); + } else + { + jam(); execute(signal, createObj->m_callback, 0); + } } void Dbdict::createObj_commit_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; jam(); - ndbrequire(returnCode == 0); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); Uint32 objId = createObjPtr.p->m_obj_id; @@ -15103,29 +15282,35 @@ Dbdict::createObj_commit_start_done(Signal* signal, void Dbdict::createObj_writeSchemaConf2(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_commit_complete_done); if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) + { + jam(); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) (signal, createObjPtr.p); + } else + { + jam(); execute(signal, createObjPtr.p->m_callback, 0); + } } void Dbdict::createObj_commit_complete_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); //@todo check error @@ -15143,27 +15328,31 @@ Dbdict::createObj_commit_complete_done(Signal* signal, void Dbdict::createObj_abort(Signal* signal, SchemaOp* op) { - jam(); - OpCreateObj * createObj = (OpCreateObj*)op; createObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_abort_start_done); if (f_dict_op[createObj->m_vt_index].m_abort_start) + { + jam(); (this->*f_dict_op[createObj->m_vt_index].m_abort_start)(signal, createObj); + } else + { + jam(); execute(signal, createObj->m_callback, 0); + } } void Dbdict::createObj_abort_start_done(Signal* signal, Uint32 callbackData, - Uint32 returnCode){ + Uint32 returnCode) +{ + CreateObjRecordPtr createObjPtr; + jam(); - - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry objEntry = * getTableEntry(xsf, createObjPtr.p->m_obj_id); @@ -15182,19 +15371,23 @@ Dbdict::createObj_abort_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + CreateObjRecordPtr createObjPtr; - CreateObjRecordPtr createObjPtr; ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::createObj_abort_complete_done); if (f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) + { + jam(); (this->*f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) (signal, createObjPtr.p); + } else + { + jam(); execute(signal, createObjPtr.p->m_callback, 0); + } } void @@ -15202,9 +15395,9 @@ Dbdict::createObj_abort_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + CreateObjRecordPtr createObjPtr; - CreateObjRecordPtr createObjPtr; + jam(); ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); @@ -15217,7 +15410,8 @@ Dbdict::createObj_abort_complete_done(Signal* signal, } void -Dbdict::execDROP_OBJ_REQ(Signal* signal){ +Dbdict::execDROP_OBJ_REQ(Signal* signal) +{ jamEntry(); if(!assembleFragments(signal)){ @@ -15230,7 +15424,6 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){ const Uint32 objId = req->objId; const Uint32 objVersion = req->objVersion; const Uint32 objType = req->objType; - const Uint32 requestInfo = req->requestInfo; DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.seize(dropObjPtr)); @@ -15256,8 +15449,9 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){ case DictTabInfo::Tablespace: case DictTabInfo::LogfileGroup: { - dropObjPtr.p->m_vt_index = 3; Ptr<Filegroup> fg_ptr; + jam(); + dropObjPtr.p->m_vt_index = 3; ndbrequire(c_filegroup_hash.find(fg_ptr, objId)); dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; break; @@ -15265,15 +15459,19 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){ } case DictTabInfo::Datafile: { - dropObjPtr.p->m_vt_index = 2; Ptr<File> file_ptr; + jam(); + dropObjPtr.p->m_vt_index = 2; ndbrequire(c_file_hash.find(file_ptr, objId)); dropObjPtr.p->m_obj_ptr_i = file_ptr.i; break; } case DictTabInfo::Undofile: + { + jam(); dropObjPtr.p->m_vt_index = 4; return; + } default: ndbrequire(false); } @@ -15288,12 +15486,12 @@ Dbdict::dropObj_prepare_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; + Callback cb; - DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - Callback cb; cb.m_callbackData = callbackData; cb.m_callbackFunction = safe_cast(&Dbdict::dropObj_prepare_writeSchemaConf); @@ -15304,7 +15502,7 @@ Dbdict::dropObj_prepare_start_done(Signal* signal, dropObj_prepare_complete_done(signal, callbackData, 0); return; } - + jam(); Uint32 objId = dropObjPtr.p->m_obj_id; XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry objEntry = *getTableEntry(xsf, objId); @@ -15317,19 +15515,23 @@ Dbdict::dropObj_prepare_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - ndbrequire(returnCode == 0); + DropObjRecordPtr dropObjPtr; - DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_prepare_complete_done); - if(f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15337,10 +15539,11 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + jam(); //@todo check for master failed @@ -15366,16 +15569,22 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal, } void -Dbdict::dropObj_commit(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::dropObj_commit(Signal * signal, SchemaOp * op) +{ OpDropObj * dropObj = (OpDropObj*)op; + dropObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_commit_start_done); if (f_dict_op[dropObj->m_vt_index].m_commit_start) + { + jam(); (this->*f_dict_op[dropObj->m_vt_index].m_commit_start)(signal, dropObj); + } else + { + jam(); execute(signal, dropObj->m_callback, 0); + } } void @@ -15383,10 +15592,10 @@ Dbdict::dropObj_commit_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); Uint32 objId = dropObjPtr.p->m_obj_id; @@ -15407,20 +15616,25 @@ Dbdict::dropObj_commit_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_commit_complete_done); if(f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15428,7 +15642,9 @@ Dbdict::dropObj_commit_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - DropObjRecordPtr dropObjPtr; + DropObjRecordPtr dropObjPtr; + + jam(); ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); //@todo check error @@ -15439,22 +15655,26 @@ Dbdict::dropObj_commit_complete_done(Signal* signal, conf->senderData = dropObjPtr.p->m_senderData; sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_COMMIT_CONF, signal, DictCommitConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); } void -Dbdict::dropObj_abort(Signal * signal, SchemaOp * op){ - jam(); - +Dbdict::dropObj_abort(Signal * signal, SchemaOp * op) +{ OpDropObj * dropObj = (OpDropObj*)op; + dropObj->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_abort_start_done); - if (f_dict_op[dropObj->m_vt_index].m_abort_start) + { + jam(); (this->*f_dict_op[dropObj->m_vt_index].m_abort_start)(signal, dropObj); + } else + { + jam(); execute(signal, dropObj->m_callback, 0); + } } void @@ -15462,10 +15682,10 @@ Dbdict::dropObj_abort_start_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { + DropObjRecordPtr dropObjPtr; + jam(); ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; @@ -15486,6 +15706,7 @@ Dbdict::dropObj_abort_start_done(Signal* signal, } else { + jam(); execute(signal, callback, 0); } } @@ -15495,20 +15716,24 @@ Dbdict::dropObj_abort_writeSchemaConf(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - jam(); + DropObjRecordPtr dropObjPtr; + ndbrequire(returnCode == 0); - - DropObjRecordPtr dropObjPtr; ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = safe_cast(&Dbdict::dropObj_abort_complete_done); if(f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) + { + jam(); (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) (signal, dropObjPtr.p); + } else + { + jam(); execute(signal, dropObjPtr.p->m_callback, 0); + } } void @@ -15516,24 +15741,26 @@ Dbdict::dropObj_abort_complete_done(Signal* signal, Uint32 callbackData, Uint32 returnCode) { - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - + DropObjRecordPtr dropObjPtr; DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); + + ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); + jam(); conf->senderRef = reference(); conf->senderData = dropObjPtr.p->m_senderData; sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_ABORT_CONF, signal, DictAbortConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); } void -Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ +Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op) +{ /** * Put data into table record */ SegmentedSectionPtr objInfoPtr; + jam(); getSection(objInfoPtr, ((OpCreateObj*)op)->m_obj_info_ptr_i); SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool()); @@ -15550,6 +15777,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ if(status != SimpleProperties::Eof) { + jam(); op->m_errorCode = CreateTableRef::InvalidFormat; break; } @@ -15558,14 +15786,19 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ { if(!fg.TS_ExtentSize) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidExtentSize; break; } } else if(fg.FilegroupType == DictTabInfo::LogfileGroup) { - if(!fg.LF_UndoBufferSize) + /** + * undo_buffer_size can't be less than 96KB in LGMAN block + */ + if(fg.LF_UndoBufferSize < 3 * File_formats::NDB_PAGE_SIZE) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidUndoBufferSize; break; } @@ -15574,16 +15807,19 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ Uint32 len = strlen(fg.FilegroupName) + 1; Uint32 hash = Rope::hash(fg.FilegroupName, len); if(get_object(fg.FilegroupName, len, hash) != 0){ + jam(); op->m_errorCode = CreateTableRef::TableAlreadyExist; break; } if(!c_obj_pool.seize(obj_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } if(!c_filegroup_pool.seize(fg_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } @@ -15593,6 +15829,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(fg.FilegroupName, len, hash)){ + jam(); op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } @@ -15606,6 +15843,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ switch(fg.FilegroupType){ case DictTabInfo::Tablespace: + { //fg.TS_DataGrow = group.m_grow_spec; fg_ptr.p->m_tablespace.m_extent_size = fg.TS_ExtentSize; fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId; @@ -15613,22 +15851,28 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ Ptr<Filegroup> lg_ptr; if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId)) { + jam(); op->m_errorCode = CreateFilegroupRef::NoSuchLogfileGroup; goto error; } if (lg_ptr.p->m_version != fg.TS_LogfileGroupVersion) { + jam(); op->m_errorCode = CreateFilegroupRef::InvalidFilegroupVersion; goto error; } increase_ref_count(lg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: + { + jam(); fg_ptr.p->m_logfilegroup.m_undo_buffer_size = fg.LF_UndoBufferSize; fg_ptr.p->m_logfilegroup.m_files.init(); //fg.LF_UndoGrow = ; break; + } default: ndbrequire(false); } @@ -15663,13 +15907,14 @@ error: } void -Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ +Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFilegroupImplReq* req = (CreateFilegroupImplReq*)signal->getDataPtrSend(); - + jam(); req->senderData = op->key; req->senderRef = reference(); req->filegroup_id = op->m_obj_id; @@ -15682,18 +15927,24 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ Uint32 len= 0; switch(op->m_obj_type){ case DictTabInfo::Tablespace: + { + jam(); ref = TSMAN_REF; len = CreateFilegroupImplReq::TablespaceLength; req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; req->tablespace.logfile_group_id = fg_ptr.p->m_tablespace.m_default_logfile_group_id; break; + } case DictTabInfo::LogfileGroup: + { + jam(); ref = LGMAN_REF; len = CreateFilegroupImplReq::LogfileGroupLength; req->logfile_group.buffer_size = fg_ptr.p->m_logfilegroup.m_undo_buffer_size; break; + } default: ndbrequire(false); } @@ -15702,12 +15953,11 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){ } void -Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILEGROUP_REF(Signal* signal) +{ CreateFilegroupImplRef * ref = (CreateFilegroupImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; @@ -15715,13 +15965,12 @@ Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){ } void -Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal) +{ CreateFilegroupImplConf * rep = (CreateFilegroupImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); execute(signal, op_ptr.p->m_callback, 0); @@ -15729,8 +15978,7 @@ Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal){ void Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){ - CreateFilegroupImplReq* req = - (CreateFilegroupImplReq*)signal->getDataPtrSend(); + (void) signal->getDataPtrSend(); if (op->m_obj_ptr_i != RNIL) { @@ -15738,13 +15986,13 @@ Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){ send_drop_fg(signal, op, DropFilegroupImplReq::Commit); return; } - + jam(); execute(signal, op->m_callback, 0); } void -Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){ - +Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op) +{ if (op->m_obj_ptr_i != RNIL) { jam(); @@ -15754,12 +16002,13 @@ Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){ release_object(fg_ptr.p->m_obj_ptr_i); c_filegroup_hash.release(fg_ptr); } - + jam(); execute(signal, op->m_callback, 0); } void -Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ +Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op) +{ /** * Put data into table record */ @@ -15779,6 +16028,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ do { if(status != SimpleProperties::Eof){ + jam(); op->m_errorCode = CreateFileRef::InvalidFormat; break; } @@ -15786,34 +16036,53 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ // Get Filegroup FilegroupPtr fg_ptr; if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId)){ + jam(); op->m_errorCode = CreateFileRef::NoSuchFilegroup; break; } if(fg_ptr.p->m_version != f.FilegroupVersion){ + jam(); op->m_errorCode = CreateFileRef::InvalidFilegroupVersion; break; } switch(f.FileType){ case DictTabInfo::Datafile: + { if(fg_ptr.p->m_type != DictTabInfo::Tablespace) + { + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; + } + jam(); break; + } case DictTabInfo::Undofile: + { if(fg_ptr.p->m_type != DictTabInfo::LogfileGroup) + { + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; + } + jam(); break; + } default: + jam(); op->m_errorCode = CreateFileRef::InvalidFileType; } if(op->m_errorCode) + { + jam(); break; + } Uint32 len = strlen(f.FileName) + 1; Uint32 hash = Rope::hash(f.FileName, len); if(get_object(f.FileName, len, hash) != 0){ + jam(); op->m_errorCode = CreateFileRef::FilenameAlreadyExists; break; } @@ -15824,6 +16093,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ m_ctx.m_config.getOwnConfigIterator(); if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl) { + jam(); op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless; break; } @@ -15831,11 +16101,13 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ // Loop through all filenames... if(!c_obj_pool.seize(obj_ptr)){ + jam(); op->m_errorCode = CreateTableRef::NoMoreTableRecords; break; } if (! c_file_pool.seize(filePtr)){ + jam(); op->m_errorCode = CreateFileRef::OutOfFileRecords; break; } @@ -15845,6 +16117,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(f.FileName, len, hash)){ + jam(); op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } @@ -15852,10 +16125,14 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ switch(fg_ptr.p->m_type){ case DictTabInfo::Tablespace: + { + jam(); increase_ref_count(fg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: { + jam(); Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); list.add(filePtr); break; @@ -15899,37 +16176,46 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ c_obj_pool.release(obj_ptr); } } - execute(signal, op->m_callback, 0); } void -Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ +Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; req->senderRef = reference(); switch(((OpCreateObj*)op)->m_restart){ case 0: + { + jam(); req->requestInfo = CreateFileImplReq::Create; break; + } case 1: + { + jam(); req->requestInfo = CreateFileImplReq::Open; break; + } case 2: + { + jam(); req->requestInfo = CreateFileImplReq::CreateForce; break; } + } req->file_id = f_ptr.p->key; req->filegroup_id = f_ptr.p->m_filegroup_id; @@ -15941,14 +16227,20 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ Uint32 len= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; len = CreateFileImplReq::DatafileLength; req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; len = CreateFileImplReq::UndofileLength; break; + } default: ndbrequire(false); } @@ -15963,42 +16255,41 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){ } void -Dbdict::execCREATE_FILE_REF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILE_REF(Signal* signal) +{ CreateFileImplRef * ref = (CreateFileImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execCREATE_FILE_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execCREATE_FILE_CONF(Signal* signal) +{ CreateFileImplConf * rep = (CreateFileImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){ +Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op) +{ /** * CONTACT TSMAN LGMAN PGMAN */ CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -16012,15 +16303,20 @@ Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){ Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, CreateFileImplReq::CommitLength, JBB); } @@ -16033,9 +16329,11 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op) if (op->m_obj_ptr_i != RNIL) { FilePtr f_ptr; + FilegroupPtr fg_ptr; + + jam(); c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -16049,20 +16347,24 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op) Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, CreateFileImplReq::AbortLength, JBB); return; } - execute(signal, op->m_callback, 0); } @@ -16072,17 +16374,21 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op) if (op->m_obj_ptr_i != RNIL) { FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - switch(fg_ptr.p->m_type){ case DictTabInfo::Tablespace: + { + jam(); decrease_ref_count(fg_ptr.p->m_obj_ptr_i); break; + } case DictTabInfo::LogfileGroup: { + jam(); Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); list.remove(f_ptr); break; @@ -16094,19 +16400,20 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op) release_object(f_ptr.p->m_obj_ptr_i); c_file_hash.release(f_ptr); } - execute(signal, op->m_callback, 0); } void Dbdict::drop_file_prepare_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Prepare); } void Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op) { + jam(); op->m_errorCode = DropFileRef::DropUndoFileNotSupported; execute(signal, op->m_callback, 0); } @@ -16114,6 +16421,7 @@ Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op) void Dbdict::drop_file_commit_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Commit); } @@ -16121,21 +16429,37 @@ void Dbdict::drop_file_commit_complete(Signal* signal, SchemaOp* op) { FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); + ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); decrease_ref_count(fg_ptr.p->m_obj_ptr_i); release_object(f_ptr.p->m_obj_ptr_i); c_file_hash.release(f_ptr); + execute(signal, op->m_callback, 0); +} +void +Dbdict::drop_undofile_commit_complete(Signal* signal, SchemaOp* op) +{ + FilePtr f_ptr; + FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); + ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); + Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); + list.remove(f_ptr); + release_object(f_ptr.p->m_obj_ptr_i); + c_file_hash.release(f_ptr); execute(signal, op->m_callback, 0); } void Dbdict::drop_file_abort_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_file(signal, op, DropFileImplReq::Abort); } @@ -16144,11 +16468,11 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op, DropFileImplReq::RequestInfo type) { DropFileImplReq* req = (DropFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - FilegroupPtr fg_ptr; + + jam(); + c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); req->senderData = op->key; @@ -16162,29 +16486,34 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op, Uint32 ref= 0; switch(op->m_obj_type){ case DictTabInfo::Datafile: + { + jam(); ref = TSMAN_REF; break; + } case DictTabInfo::Undofile: + { + jam(); ref = LGMAN_REF; break; + } default: ndbrequire(false); } - sendSignal(ref, GSN_DROP_FILE_REQ, signal, DropFileImplReq::SignalLength, JBB); } void -Dbdict::execDROP_OBJ_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_OBJ_REF(Signal* signal) +{ DropObjRef * const ref = (DropObjRef*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DropObjRef::NF_FakeErrorREF){ + jam(); trans_ptr.p->setErrorCode(ref->errorCode); } Uint32 node = refToNode(ref->senderRef); @@ -16192,65 +16521,61 @@ Dbdict::execDROP_OBJ_REF(Signal* signal){ } void -Dbdict::execDROP_OBJ_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_OBJ_CONF(Signal* signal) +{ DropObjConf * const conf = (DropObjConf*)signal->getDataPtr(); - Ptr<SchemaTransaction> trans_ptr; + + jamEntry(); ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); } void -Dbdict::execDROP_FILE_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILE_REF(Signal* signal) +{ DropFileImplRef * ref = (DropFileImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILE_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILE_CONF(Signal* signal) +{ DropFileImplConf * rep = (DropFileImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILEGROUP_REF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILEGROUP_REF(Signal* signal) +{ DropFilegroupImplRef * ref = (DropFilegroupImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); } void -Dbdict::execDROP_FILEGROUP_CONF(Signal* signal){ - jamEntry(); - +Dbdict::execDROP_FILEGROUP_CONF(Signal* signal) +{ DropFilegroupImplConf * rep = (DropFilegroupImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; + + jamEntry(); ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); } @@ -16263,11 +16588,13 @@ Dbdict::drop_fg_prepare_start(Signal* signal, SchemaOp* op) DictObject * obj = c_obj_pool.getPtr(fg_ptr.p->m_obj_ptr_i); if (obj->m_ref_count) { + jam(); op->m_errorCode = DropFilegroupRef::FilegroupInUse; execute(signal, op->m_callback, 0); } else { + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Prepare); } } @@ -16279,7 +16606,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); if (op->m_obj_type == DictTabInfo::LogfileGroup) { - + jam(); /** * Mark all undofiles as dropped */ @@ -16288,6 +16615,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr)) { + jam(); Uint32 objId = filePtr.p->key; SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, objId); tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; @@ -16300,13 +16628,14 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) else if(op->m_obj_type == DictTabInfo::Tablespace) { FilegroupPtr lg_ptr; + jam(); ndbrequire(c_filegroup_hash. find(lg_ptr, fg_ptr.p->m_tablespace.m_default_logfile_group_id)); decrease_ref_count(lg_ptr.p->m_obj_ptr_i); } - + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Commit); } @@ -16315,16 +16644,17 @@ Dbdict::drop_fg_commit_complete(Signal* signal, SchemaOp* op) { FilegroupPtr fg_ptr; c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - + + jam(); release_object(fg_ptr.p->m_obj_ptr_i); c_filegroup_hash.release(fg_ptr); - execute(signal, op->m_callback, 0); } void Dbdict::drop_fg_abort_start(Signal* signal, SchemaOp* op) { + jam(); send_drop_fg(signal, op, DropFilegroupImplReq::Abort); } diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 9801599fa08..3fff330d699 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -376,6 +376,11 @@ public: Uint32 fragmentCount; Uint32 m_tablespace_id; + + /* + * Access rights to table during single user mode + */ + Uint8 singleUserMode; }; typedef Ptr<TableRecord> TableRecordPtr; @@ -1132,6 +1137,7 @@ private: * seize/release invokes ctor/dtor automatically. */ struct OpRecordCommon { + OpRecordCommon() {} Uint32 key; // key shared between master and slaves Uint32 nextHash; Uint32 prevHash; @@ -1147,6 +1153,7 @@ private: * Create table record */ struct CreateTableRecord : OpRecordCommon { + CreateTableRecord() {} Uint32 m_senderRef; Uint32 m_senderData; Uint32 m_coordinatorRef; @@ -1190,6 +1197,7 @@ private: * Drop table record */ struct DropTableRecord : OpRecordCommon { + DropTableRecord() {} DropTableReq m_request; Uint32 m_requestType; @@ -1973,6 +1981,7 @@ public: NodeBitmask m_nodes; Uint32 m_errorCode; + SchemaTransaction() {} void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;} /** @@ -2552,8 +2561,16 @@ private: void restartCreateTab_dihComplete(Signal* signal, Uint32 callback, Uint32); void restartCreateTab_activateComplete(Signal*, Uint32 callback, Uint32); - void restartDropTab(Signal* signal, Uint32 tableId); + void restartDropTab(Signal* signal, Uint32 tableId, + const SchemaFile::TableEntry *, + const SchemaFile::TableEntry *); void restartDropTab_complete(Signal*, Uint32 callback, Uint32); + + void restartDropObj(Signal*, Uint32, const SchemaFile::TableEntry *); + void restartDropObj_prepare_start_done(Signal*, Uint32, Uint32); + void restartDropObj_prepare_complete_done(Signal*, Uint32, Uint32); + void restartDropObj_commit_start_done(Signal*, Uint32, Uint32); + void restartDropObj_commit_complete_done(Signal*, Uint32, Uint32); void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32); void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32); @@ -2646,7 +2663,8 @@ public: void send_drop_fg(Signal*, SchemaOp*, DropFilegroupImplReq::RequestInfo); void drop_undofile_prepare_start(Signal* signal, SchemaOp*); - + void drop_undofile_commit_complete(Signal* signal, SchemaOp*); + int checkSingleUserMode(Uint32 senderRef); }; diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp index ca9b733f4d2..44326e213d0 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp @@ -15,7 +15,7 @@ #include <ndb_global.h> -#include <ndb_version.h> +#include <util/version.h> #include <NdbMain.h> #include <NdbOut.hpp> @@ -40,6 +40,8 @@ usage() << "Example: " << progname << " -ceq ndb_*_fs/D[12]/DBDICT/P0.SchemaLog" << endl; } +#ifdef NOT_USED + static void fill(const char * buf, int mod) { @@ -50,6 +52,7 @@ fill(const char * buf, int mod) len++; } } +#endif static const char* version(Uint32 v) diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 317983323cc..1177500bc27 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -700,7 +700,6 @@ private: void execFSREADREF(Signal *); void execFSWRITECONF(Signal *); void execFSWRITEREF(Signal *); - void execSET_VAR_REQ(Signal *); void execCHECKNODEGROUPSREQ(Signal *); void execSTART_INFOREQ(Signal*); void execSTART_INFOREF(Signal*); @@ -1302,6 +1301,7 @@ public: private: struct LcpState { + LcpState() {} LcpStatus lcpStatus; Uint32 lcpStatusUpdatedPlace; @@ -1409,6 +1409,7 @@ public: private: class MasterTakeOverState { public: + MasterTakeOverState() {} void set(LcpMasterTakeOverState s, Uint32 line) { state = s; updatePlace = line; } @@ -1496,6 +1497,7 @@ private: * SwitchReplicaRecord - Should only be used by master */ struct SwitchReplicaRecord { + SwitchReplicaRecord() {} void clear(){} Uint32 nodeId; diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp index 58fe8cc2326..aff31d625f4 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp @@ -204,7 +204,6 @@ Dbdih::Dbdih(Block_context& ctx): addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true); addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF); addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ); addRecSignal(GSN_START_INFOREQ, &Dbdih::execSTART_INFOREQ); diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 217c6f38ade..dc35e6fba41 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -747,7 +747,8 @@ done: } ndbrequire(ok); - + CRASH_INSERTION(7183); + if (ERROR_INSERTED(7185) && reason==CopyGCIReq::GLOBAL_CHECKPOINT) { jam(); @@ -1207,11 +1208,69 @@ void Dbdih::execTAB_COMMITREQ(Signal* signal) void Dbdih::execDIH_RESTARTREQ(Signal* signal) { jamEntry(); - cntrlblockref = signal->theData[0]; - if(m_ctx.m_config.getInitialStart()){ - sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); - } else { - readGciFileLab(signal); + if (signal->theData[0]) + { + jam(); + cntrlblockref = signal->theData[0]; + if(m_ctx.m_config.getInitialStart()){ + sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); + } else { + readGciFileLab(signal); + } + } + else + { + /** + * Precondition, (not checked) + * atleast 1 node in each node group + */ + Uint32 i; + NdbNodeBitmask mask; + mask.assign(NdbNodeBitmask::Size, signal->theData + 1); + Uint32 *node_gcis = signal->theData+1+NdbNodeBitmask::Size; + Uint32 node_group_gcis[MAX_NDB_NODES+1]; + bzero(node_group_gcis, sizeof(node_group_gcis)); + for (i = 0; i<MAX_NDB_NODES; i++) + { + if (mask.get(i)) + { + jam(); + Uint32 ng = Sysfile::getNodeGroup(i, SYSFILE->nodeGroups); + ndbrequire(ng < MAX_NDB_NODES); + Uint32 gci = node_gcis[i]; + if (gci < SYSFILE->lastCompletedGCI[i]) + { + jam(); + /** + * Handle case, where *I* know that node complete GCI + * but node does not...bug#29167 + * i.e node died before it wrote own sysfile + */ + gci = SYSFILE->lastCompletedGCI[i]; + } + + if (gci > node_group_gcis[ng]) + { + jam(); + node_group_gcis[ng] = gci; + } + } + } + for (i = 0; i<MAX_NDB_NODES && node_group_gcis[i] == 0; i++); + + Uint32 gci = node_group_gcis[i]; + for (i++ ; i<MAX_NDB_NODES; i++) + { + jam(); + if (node_group_gcis[i] && node_group_gcis[i] != gci) + { + jam(); + signal->theData[0] = i; + return; + } + } + signal->theData[0] = MAX_NDB_NODES; + return; } return; }//Dbdih::execDIH_RESTARTREQ() @@ -1543,10 +1602,26 @@ void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref) */ SYSFILE->lastCompletedGCI[nodePtr.i] = 0; ndbrequire(nodePtr.p->nodeStatus != NodeRecord::ALIVE); - warningEvent("Making filesystem for node %d unusable", + warningEvent("Making filesystem for node %d unusable (need --initial)", nodePtr.i); } + else if (nodePtr.p->nodeStatus == NodeRecord::ALIVE && + SYSFILE->lastCompletedGCI[nodePtr.i] == 0) + { + jam(); + CRASH_INSERTION(7170); + char buf[255]; + BaseString::snprintf(buf, sizeof(buf), + "Cluster requires this node to be started " + " with --initial as partial start has been performed" + " and this filesystem is unusable"); + progError(__LINE__, + NDBD_EXIT_SR_RESTARTCONFLICT, + buf); + ndbrequire(false); + } } + /** * This set which GCI we will try to restart to */ @@ -2043,8 +2118,11 @@ void Dbdih::gcpBlockedLab(Signal* signal) /*-------------------------------------------------------------------------*/ Uint32 startVersion = getNodeInfo(c_nodeStartMaster.startNode).m_version; - if ((getMajor(startVersion) == 4 && startVersion >= NDBD_INCL_NODECONF_VERSION_4) || - (getMajor(startVersion) == 5 && startVersion >= NDBD_INCL_NODECONF_VERSION_5)) + if ((getMajor(startVersion) == 4 && + startVersion >= NDBD_INCL_NODECONF_VERSION_4) || + (getMajor(startVersion) == 5 && + startVersion >= NDBD_INCL_NODECONF_VERSION_5) || + (getMajor(startVersion) > 5)) { c_INCL_NODEREQ_Counter.setWaitingFor(c_nodeStartMaster.startNode); } @@ -2283,8 +2361,11 @@ void Dbdih::execINCL_NODEREQ(Signal* signal) CRASH_INSERTION(7171); Uint32 masterVersion = getNodeInfo(refToNode(cmasterdihref)).m_version; - if ((NDB_VERSION_MAJOR == 4 && masterVersion >= NDBD_INCL_NODECONF_VERSION_4) || - (NDB_VERSION_MAJOR == 5 && masterVersion >= NDBD_INCL_NODECONF_VERSION_5)) + if ((NDB_VERSION_MAJOR == 4 && + masterVersion >= NDBD_INCL_NODECONF_VERSION_4) || + (NDB_VERSION_MAJOR == 5 && + masterVersion >= NDBD_INCL_NODECONF_VERSION_5) || + (NDB_VERSION_MAJOR > 5)) { signal->theData[0] = getOwnNodeId(); signal->theData[1] = getOwnNodeId(); @@ -2906,7 +2987,6 @@ Dbdih::nr_start_fragment(Signal* signal, if (replicaPtr.p->lcpStatus[idx] == ZVALID) { ndbrequire(replicaPtr.p->lcpId[idx] > maxLcpId); - Uint32 startGci = replicaPtr.p->maxGciCompleted[idx]; Uint32 stopGci = replicaPtr.p->maxGciStarted[idx]; for (;j < replicaPtr.p->noCrashedReplicas; j++) { @@ -10636,8 +10716,6 @@ Dbdih::handle_invalid_lcp_no(const LcpFragRep* rep, ndbrequire(!isMaster()); Uint32 lcpNo = rep->lcpNo; Uint32 lcpId = rep->lcpId; - Uint32 replicaLcpNo = replicaPtr.p->nextLcp; - Uint32 prevReplicaLcpNo = prevLcpNo(replicaLcpNo); warningEvent("Detected previous node failure of %d during lcp", rep->nodeId); @@ -12612,7 +12690,7 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[]) (buf, sizeof(buf), "Illegal initial start, no alive node in nodegroup %u", i); progError(__LINE__, - NDBD_EXIT_SR_RESTARTCONFLICT, + NDBD_EXIT_INSUFFICENT_NODES, buf); } @@ -12752,14 +12830,23 @@ void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr) /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/ /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */ /*----------------------------------------------------------------------*/ + Uint32 lastGCI = SYSFILE->lastCompletedGCI[nodeId]; arrGuardErr(ncrReplicaPtr.p->noCrashedReplicas + 1, 8, NDBD_EXIT_MAX_CRASHED_REPLICAS); ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = - SYSFILE->lastCompletedGCI[nodeId]; + lastGCI; ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1; ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0; ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = (Uint32)-1; + + if (ncrReplicaPtr.p->noCrashedReplicas == 7 && lastGCI) + { + jam(); + SYSFILE->lastCompletedGCI[nodeId] = 0; + warningEvent("Making filesystem for node %d unusable (need --initial)", + nodeId); + } }//Dbdih::newCrashedReplica() /*************************************************************************/ @@ -14746,30 +14833,6 @@ Dbdih::execNDB_TAMPER(Signal* signal) return; }//Dbdih::execNDB_TAMPER() -void Dbdih::execSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - case TimeBetweenLocalCheckpoints: - c_lcpState.clcpDelay = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case TimeBetweenGlobalCheckpoints: - cgcpDelay = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -} - void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){ BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0]; diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index b801e14104a..1bed25fb5a8 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -489,6 +489,7 @@ public: typedef Ptr<Databuf> DatabufPtr; struct ScanRecord { + ScanRecord() {} enum ScanState { SCAN_FREE = 0, WAIT_STORED_PROC_COPY = 1, @@ -2024,7 +2025,6 @@ public: Uint8 reclenAiLqhkey; Uint8 m_offset_current_keybuf; Uint8 replicaType; - Uint8 simpleRead; Uint8 seqNoReplica; Uint8 tcNodeFailrec; Uint8 m_disk_table; @@ -2166,7 +2166,6 @@ private: void execFSREADCONF(Signal* signal); void execFSREADREF(Signal* signal); void execSCAN_HBREP(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execTIME_SIGNAL(Signal* signal); void execFSSYNCCONF(Signal* signal); @@ -2901,6 +2900,7 @@ public: * */ struct CommitAckMarker { + CommitAckMarker() {} Uint32 transid1; Uint32 transid2; @@ -2927,6 +2927,7 @@ public: void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i); struct Counters { + Counters() {} Uint32 operations; inline void clear(){ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp index d4d026985fd..d6411ee1cb9 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp @@ -274,7 +274,6 @@ Dblqh::Dblqh(Block_context& ctx): addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF); addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true); addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF); - addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ); addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL); addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF); addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD); diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 6f0d676194f..f511e00afaa 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -588,6 +588,26 @@ Dblqh::execDEFINE_BACKUP_REF(Signal* signal) { jamEntry(); m_backup_ptr = RNIL; + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); + int err_code = 0; + char * extra_msg = NULL; + + switch(ref->errorCode){ + case DefineBackupRef::Undefined: + case DefineBackupRef::FailedToSetupFsBuffers: + case DefineBackupRef::FailedToAllocateBuffers: + case DefineBackupRef::FailedToAllocateTables: + case DefineBackupRef::FailedAllocateTableMem: + case DefineBackupRef::FailedToAllocateFileRecord: + case DefineBackupRef::FailedToAllocateAttributeRecord: + case DefineBackupRef::FailedInsertFileHeader: + case DefineBackupRef::FailedInsertTableList: + jam(); + err_code = NDBD_EXIT_INVALID_CONFIG; + extra_msg = (char*) "Probably Backup parameters configuration error, Please consult the manual"; + progError(__LINE__, err_code, extra_msg); + } + sendsttorryLab(signal); } @@ -2524,8 +2544,16 @@ Dblqh::execREMOVE_MARKER_ORD(Signal* signal) CommitAckMarkerPtr removedPtr; m_commitAckMarkerHash.remove(removedPtr, key); +#if defined VM_TRACE || defined ERROR_INSERT ndbrequire(removedPtr.i != RNIL); m_commitAckMarkerPool.release(removedPtr); +#else + if (removedPtr.i != RNIL) + { + jam(); + m_commitAckMarkerPool.release(removedPtr); + } +#endif #ifdef MARKER_TRACE ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2); #endif @@ -3183,20 +3211,23 @@ void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length) { TcConnectionrec * const regTcPtr = tcConnectptr.p; if (regTcPtr->operation != ZREAD) { - if (regTcPtr->opExec != 1) { - if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { - ; - } else { - jam(); + if (regTcPtr->operation != ZDELETE) + { + if (regTcPtr->opExec != 1) { + if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { + ; + } else { + jam(); /* ------------------------------------------------------------------------- */ /* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */ /* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */ /* ------------------------------------------------------------------------- */ - localAbortStateHandlerLab(signal); - return; + localAbortStateHandlerLab(signal); + return; + }//if }//if }//if - }//if + } c_tup->receive_attrinfo(signal, regTcPtr->tupConnectrec, dataPtr, length); }//Dblqh::lqhAttrinfoLab() @@ -3450,7 +3481,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal) markerPtr.p->tcNodeId = tcNodeId; CommitAckMarkerPtr tmp; -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT #ifdef MARKER_TRACE ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2); #endif @@ -3465,7 +3496,6 @@ void Dblqh::execLQHKEYREQ(Signal* signal) regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo); regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo); regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo); - regTcPtr->simpleRead = op == ZREAD && regTcPtr->opSimple; regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo); UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo); regTcPtr->apiVersionNo = 0; @@ -3482,9 +3512,15 @@ void Dblqh::execLQHKEYREQ(Signal* signal) regTcPtr->lockType = op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; } + + if (regTcPtr->dirtyOp) + { + ndbrequire(regTcPtr->opSimple); + } - CRASH_INSERTION2(5041, regTcPtr->simpleRead && - refToNode(signal->senderBlockRef()) != cownNodeid); + CRASH_INSERTION2(5041, (op == ZREAD && + (regTcPtr->opSimple || regTcPtr->dirtyOp) && + refToNode(signal->senderBlockRef()) != cownNodeid)); regTcPtr->reclenAiLqhkey = TreclenAiLqhkey; regTcPtr->currReclenAi = TreclenAiLqhkey; @@ -3656,8 +3692,8 @@ void Dblqh::execLQHKEYREQ(Signal* signal) Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi); if ((tfragDistKey != TdistKey) && (regTcPtr->seqNoReplica == 0) && - (regTcPtr->dirtyOp == ZFALSE) && - (regTcPtr->simpleRead == ZFALSE)) { + (regTcPtr->dirtyOp == ZFALSE)) + { /* ---------------------------------------------------------------------- * WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION. * THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER @@ -3976,7 +4012,6 @@ void Dblqh::handle_nr_copy(Signal* signal, Ptr<TcConnectionrec> regTcPtr) { jam(); - Uint32 tableId = regTcPtr.p->tableref; Uint32 fragPtr = fragptr.p->tupFragptr; Uint32 op = regTcPtr.p->operation; @@ -4270,7 +4305,7 @@ Dblqh::nr_copy_delete_row(Signal* signal, signal->theData, sizeof(Local_key)); regTcPtr.p->m_nr_delete.m_page_id[pos] = RNIL; regTcPtr.p->m_nr_delete.m_cnt = pos + 2; - ndbout << "PENDING DISK DELETE: " << + if (0) ndbout << "PENDING DISK DELETE: " << regTcPtr.p->m_nr_delete.m_disk_ref[pos] << endl; } @@ -4748,7 +4783,18 @@ void Dblqh::tupkeyConfLab(Signal* signal) TRACE_OP(regTcPtr, "TUPKEYCONF"); - if (regTcPtr->simpleRead) { + if (readLen != 0) + { + jam(); + + /* SET BIT 15 IN REQINFO */ + LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1); + regTcPtr->readlenAi = readLen; + }//if + + if (regTcPtr->operation == ZREAD && + (regTcPtr->opSimple || regTcPtr->dirtyOp)) + { jam(); /* ---------------------------------------------------------------------- * THE OPERATION IS A SIMPLE READ. @@ -4762,14 +4808,6 @@ void Dblqh::tupkeyConfLab(Signal* signal) commitContinueAfterBlockedLab(signal); return; }//if - if (readLen != 0) - { - jam(); - - /* SET BIT 15 IN REQINFO */ - LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1); - regTcPtr->readlenAi = readLen; - }//if regTcPtr->totSendlenAi = writeLen; ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen); @@ -5148,12 +5186,15 @@ void Dblqh::packLqhkeyreqLab(Signal* signal) /* */ /* ------------------------------------------------------------------------- */ sendLqhkeyconfTc(signal, regTcPtr->tcBlockref); - if (regTcPtr->dirtyOp != ZTRUE) { + if (! (regTcPtr->dirtyOp || + (regTcPtr->operation == ZREAD && regTcPtr->opSimple))) + { jam(); regTcPtr->transactionState = TcConnectionrec::PREPARED; releaseOprec(signal); } else { jam(); + /*************************************************************>*/ /* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */ /* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/ @@ -6376,8 +6417,8 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal) Ptr<TcConnectionrec> regTcPtr = tcConnectptr; Ptr<Fragrecord> regFragptr = fragptr; Uint32 operation = regTcPtr.p->operation; - Uint32 simpleRead = regTcPtr.p->simpleRead; Uint32 dirtyOp = regTcPtr.p->dirtyOp; + Uint32 opSimple = regTcPtr.p->opSimple; if (regTcPtr.p->activeCreat != Fragrecord::AC_IGNORED) { if (operation != ZREAD) { TupCommitReq * const tupCommitReq = @@ -6435,20 +6476,29 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal) EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1); } - if (simpleRead) { + if (dirtyOp) + { jam(); -/* ------------------------------------------------------------------------- */ -/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */ -/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */ -/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */ -/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */ -/*PERFORMED. */ -/* ------------------------------------------------------------------------- */ + /** + * The dirtyRead does not send anything but TRANSID_AI from LDM + */ fragptr = regFragptr; tcConnectptr = regTcPtr; cleanUp(signal); return; - }//if + } + + /** + * The simpleRead will send a LQHKEYCONF + * but have already released the locks + */ + if (opSimple) + { + fragptr = regFragptr; + tcConnectptr = regTcPtr; + packLqhkeyreqLab(signal); + return; + } } }//if jamEntry(); @@ -6742,7 +6792,6 @@ void Dblqh::execABORT(Signal* signal) }//if TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; if (ERROR_INSERTED(5100)) { SET_ERROR_INSERT_VALUE(5101); @@ -6822,7 +6871,6 @@ void Dblqh::execABORTREQ(Signal* signal) return; }//if TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; if (regTcPtr->transactionState != TcConnectionrec::PREPARED) { warningReport(signal, 10); return; @@ -7060,7 +7108,7 @@ void Dblqh::abortStateHandlerLab(Signal* signal) /* ------------------------------------------------------------------------- */ return; }//if - if (regTcPtr->simpleRead) { + if (regTcPtr->opSimple) { jam(); /* ------------------------------------------------------------------------- */ /*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */ @@ -7243,7 +7291,8 @@ void Dblqh::execACC_ABORTCONF(Signal* signal) TRACE_OP(regTcPtr, "ACC_ABORTCONF"); signal->theData[0] = regTcPtr->tupConnectrec; EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1); - + + jamEntry(); continueAbortLab(signal); return; }//Dblqh::execACC_ABORTCONF() @@ -7327,7 +7376,8 @@ void Dblqh::continueAbortLab(Signal* signal) void Dblqh::continueAfterLogAbortWriteLab(Signal* signal) { TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->simpleRead) { + if (regTcPtr->operation == ZREAD && regTcPtr->dirtyOp) + { jam(); TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend(); @@ -7586,7 +7636,7 @@ void Dblqh::lqhTransNextLab(Signal* signal) * THE RECEIVER OF THE COPY HAVE FAILED. * WE HAVE TO CLOSE THE COPY PROCESS. * ----------------------------------------------------------- */ - ndbout_c("close copy"); + if (0) ndbout_c("close copy"); tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i; tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC; closeCopyRequestLab(signal); @@ -8371,7 +8421,6 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal) const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo); const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo); const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo); - const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo); ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){ @@ -9698,7 +9747,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) active.add(scanptr); if(scanptr.p->scanKeyinfoFlag){ jam(); -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT ScanRecordPtr tmp; ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p)); #endif @@ -9822,7 +9871,7 @@ void Dblqh::finishScanrec(Signal* signal) scans.add(restart); if(restart.p->scanKeyinfoFlag){ jam(); -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT ScanRecordPtr tmp; ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p)); #endif @@ -9883,9 +9932,11 @@ Uint32 Dblqh::sendKeyinfo20(Signal* signal, const Uint32 scanOp = scanP->m_curr_batch_size_rows; const Uint32 nodeId = refToNode(ref); const bool connectedToNode = getNodeInfo(nodeId).m_connected; - //const Uint32 type = getNodeInfo(nodeId).m_type; - //const bool is_api= (type >= NodeInfo::API && type <= NodeInfo::REP); - //const bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); +#ifdef NOT_USED + const Uint32 type = getNodeInfo(nodeId).m_type; + const bool is_api= (type >= NodeInfo::API && type <= NodeInfo::REP); + const bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); +#endif const bool longable = true; // TODO is_api && !old_dest; Uint32 * dst = keyInfo->keyData; @@ -9986,7 +10037,9 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted) return; } ScanFragConf * conf = (ScanFragConf*)&signal->theData[0]; - //NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref); +#ifdef NOT_USED + NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref); +#endif Uint32 trans_id1= tcConnectptr.p->transid[0]; Uint32 trans_id2= tcConnectptr.p->transid[1]; @@ -10880,7 +10933,7 @@ void Dblqh::tupCopyCloseConfLab(Signal* signal) void Dblqh::closeCopyRequestLab(Signal* signal) { scanptr.p->scanErrorCounter++; - ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState); + if (0) ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState); switch (scanptr.p->scanState) { case ScanRecord::WAIT_TUPKEY_COPY: case ScanRecord::WAIT_NEXT_SCAN_COPY: @@ -11481,7 +11534,17 @@ void Dblqh::execLCP_PREPARE_CONF(Signal* signal) void Dblqh::execBACKUP_FRAGMENT_REF(Signal* signal) { - ndbrequire(false); + BackupFragmentRef *ref= (BackupFragmentRef*)signal->getDataPtr(); + char buf[100]; + BaseString::snprintf(buf,sizeof(buf), + "Unable to store fragment during LCP. NDBFS Error: %u", + ref->errorCode); + + progError(__LINE__, + (ref->errorCode & FsRef::FS_ERR_BIT)? + NDBD_EXIT_AFS_UNKNOWN + : ref->errorCode, + buf); } void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) @@ -14178,15 +14241,6 @@ void Dblqh::execSTART_RECREQ(Signal* signal) * WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED * WITH A PROPER GCI. *------------------------------------------------------------------------ */ - if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){ - jam(); - cstartRecReq = 2; - StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); - conf->startingNodeId = getOwnNodeId(); - sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - return; - }//if if (c_lcp_restoring_fragments.isEmpty()) { @@ -14239,6 +14293,19 @@ void Dblqh::execSTART_RECCONF(Signal* signal) jam(); csrExecUndoLogState = EULS_COMPLETED; + + if(cstartType == NodeState::ST_INITIAL_NODE_RESTART) + { + jam(); + cstartRecReq = 2; + + StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); + conf->startingNodeId = getOwnNodeId(); + sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, + StartRecConf::SignalLength, JBB); + return; + } + c_lcp_complete_fragments.first(fragptr); build_acc(signal, fragptr.i); return; @@ -15362,8 +15429,6 @@ void Dblqh::execDEBUG_SIG(Signal* signal) 2.5 TEMPORARY VARIABLES ----------------------- */ - UintR tdebug; - jamEntry(); //logPagePtr.i = signal->theData[0]; //tdebug = logPagePtr.p->logPageWord[0]; @@ -18983,7 +19048,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) ndbout << " operation = " << tcRec.p->operation<<endl; ndbout << " tcNodeFailrec = " << tcRec.p->tcNodeFailrec << " seqNoReplica = " << tcRec.p->seqNoReplica - << " simpleRead = " << tcRec.p->simpleRead << endl; ndbout << " replicaType = " << tcRec.p->replicaType << " reclenAiLqhkey = " << tcRec.p->reclenAiLqhkey @@ -19137,30 +19201,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) }//Dblqh::execDUMP_STATE_ORD() -void Dblqh::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - - switch (var) { - - case NoOfConcurrentCheckpointsAfterRestart: - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfConcurrentCheckpointsDuringRestart: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -}//execSET_VAR_REQ() - - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ---------------------- TRIGGER HANDLING ------------------------ */ diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 203febef3f6..3d5e52a525d 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -302,6 +302,7 @@ public: /* WHEN THE TRIGGER IS DEACTIVATED. */ /* **************************************** */ struct TcDefinedTriggerData { + TcDefinedTriggerData() {} /** * Trigger id, used to identify the trigger */ @@ -709,6 +710,7 @@ public: Uint8 tckeyrec; // Ändrad från R Uint8 tcindxrec; Uint8 apiFailState; // Ändrad från R + Uint8 singleUserMode; ReturnSignal returnsignal; Uint8 timeOutCounter; @@ -784,6 +786,7 @@ public: UintR apiConnect; /* POINTER TO API CONNECT RECORD */ UintR nextTcConnect; /* NEXT TC RECORD*/ Uint8 dirtyOp; + Uint8 opSimple; Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */ Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */ Uint8 operation; /* OPERATION TYPE */ @@ -884,13 +887,8 @@ public: Uint8 opExec; Uint8 unused; + Uint8 unused1; - /** - * IS THE OPERATION A SIMPLE TRANSACTION - * 0 = NO, 1 = YES - */ - Uint8 opSimple; - //--------------------------------------------------- // Second 16 byte cache line in second 64 byte cache // line. Diverse use. @@ -958,11 +956,23 @@ public: /* ALL TABLES IN THE SYSTEM. */ /********************************************************/ struct TableRecord { + TableRecord() {} Uint32 currentSchemaVersion; - Uint8 enabled; - Uint8 dropping; + Uint16 m_flags; Uint8 tableType; - Uint8 storedTable; + Uint8 singleUserMode; + + enum { + TR_ENABLED = 1 << 0, + TR_DROPPING = 1 << 1, + TR_STORED_TABLE = 1 << 2 + }; + Uint8 get_enabled() const { return (m_flags & TR_ENABLED) != 0; } + Uint8 get_dropping() const { return (m_flags & TR_DROPPING) != 0; } + Uint8 get_storedTable() const { return (m_flags & TR_STORED_TABLE) != 0; } + void set_enabled(Uint8 f) { f ? m_flags |= (Uint16)TR_ENABLED : m_flags &= ~(Uint16)TR_ENABLED; } + void set_dropping(Uint8 f) { f ? m_flags |= (Uint16)TR_DROPPING : m_flags &= ~(Uint16)TR_DROPPING; } + void set_storedTable(Uint8 f) { f ? m_flags |= (Uint16)TR_STORED_TABLE : m_flags &= ~(Uint16)TR_STORED_TABLE; } Uint8 noOfKeyAttr; Uint8 hasCharAttr; @@ -970,7 +980,7 @@ public: Uint8 hasVarKeys; bool checkTable(Uint32 schemaVersion) const { - return enabled && !dropping && + return get_enabled() && !get_dropping() && (table_version_major(schemaVersion) == table_version_major(currentSchemaVersion)); } @@ -1325,7 +1335,6 @@ private: void execTIME_SIGNAL(Signal* signal); void execAPI_FAILREQ(Signal* signal); void execSCAN_HBREP(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execABORT_ALL_REQ(Signal* signal); @@ -1451,7 +1460,7 @@ private: void releaseAttrinfo(); void releaseGcp(Signal* signal); void releaseKeys(); - void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*); + void releaseDirtyRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*); void releaseDirtyWrite(Signal* signal); void releaseTcCon(); void releaseTcConnectFail(Signal* signal); @@ -1486,12 +1495,12 @@ private: void clearCommitAckMarker(ApiConnectRecord * const regApiPtr, TcConnectRecord * const regTcPtr); // Trigger and index handling - bool saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); + int saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len); bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp); - bool saveINDXATTRINFO(Signal* signal, + int saveINDXATTRINFO(Signal* signal, TcIndexOperation* indexOp, const Uint32 *src, Uint32 len); @@ -1607,7 +1616,7 @@ private: void startphase1x010Lab(Signal* signal); void lqhKeyConf_checkTransactionState(Signal * signal, - ApiConnectRecord * const regApiPtr); + Ptr<ApiConnectRecord> regApiPtr); void checkDropTab(Signal* signal); @@ -1667,6 +1676,7 @@ private: UintR tcheckGcpId; struct TransCounters { + TransCounters() {} enum { Off, Timer, Started } c_trans_status; UintR cattrinfoCount; UintR ctransCount; @@ -1805,6 +1815,7 @@ private: */ public: struct CommitAckMarker { + CommitAckMarker() {} Uint32 transid1; Uint32 transid2; union { Uint32 nextPool; Uint32 nextHash; }; @@ -1837,9 +1848,14 @@ private: Uint32 transid2); void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket); - bool getAllowStartTransaction() const { - if(getNodeState().getSingleUserMode()) - return true; + bool getAllowStartTransaction(Uint32 nodeId, Uint32 table_single_user_mode) const { + if (unlikely(getNodeState().getSingleUserMode())) + { + if (getNodeState().getSingleUserApi() == nodeId || table_single_user_mode) + return true; + else + return false; + } return getNodeState().startLevel < NodeState::SL_STOPPING_2; } diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp index 452ae6d8d70..3bba771f3f0 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp @@ -245,7 +245,6 @@ Dbtc::Dbtc(Block_context& ctx): addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ); addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL); addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ); - addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ); addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK); addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ); diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index e8244ce0c66..ce20059e663 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -20,6 +20,7 @@ #include <RefConvert.hpp> #include <ndb_limits.h> #include <my_sys.h> +#include <ndb_rand.h> #include <signaldata/EventReport.hpp> #include <signaldata/TcKeyReq.hpp> @@ -343,19 +344,21 @@ void Dbtc::execTC_SCHVERREQ(Signal* signal) tabptr.i = signal->theData[0]; ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord); tabptr.p->currentSchemaVersion = signal->theData[1]; - tabptr.p->storedTable = (bool)signal->theData[2]; + tabptr.p->m_flags = 0; + tabptr.p->set_storedTable((bool)signal->theData[2]); BlockReference retRef = signal->theData[3]; tabptr.p->tableType = (Uint8)signal->theData[4]; BlockReference retPtr = signal->theData[5]; Uint32 noOfKeyAttr = signal->theData[6]; + tabptr.p->singleUserMode = (Uint8)signal->theData[7]; ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX); const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tabptr.i); ndbrequire(noOfKeyAttr == desc->noOfKeyAttr); - ndbrequire(tabptr.p->enabled == false); - tabptr.p->enabled = true; - tabptr.p->dropping = false; + ndbrequire(tabptr.p->get_enabled() == false); + tabptr.p->set_enabled(true); + tabptr.p->set_dropping(false); tabptr.p->noOfKeyAttr = desc->noOfKeyAttr; tabptr.p->hasCharAttr = desc->hasCharAttr; tabptr.p->noOfDistrKeys = desc->noOfDistrKeys; @@ -379,7 +382,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) Uint32 senderRef = req->senderRef; Uint32 senderData = req->senderData; - if(!tabPtr.p->enabled){ + if(!tabPtr.p->get_enabled()){ jam(); PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -391,7 +394,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) return; } - if(tabPtr.p->dropping){ + if(tabPtr.p->get_dropping()){ jam(); PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -403,7 +406,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) return; } - tabPtr.p->dropping = true; + tabPtr.p->set_dropping(true); tabPtr.p->dropTable.senderRef = senderRef; tabPtr.p->dropTable.senderData = senderData; @@ -439,7 +442,7 @@ Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal) tabPtr.i = conf->tableId; ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - ndbrequire(tabPtr.p->dropping == true); + ndbrequire(tabPtr.p->get_dropping() == true); Uint32 nodeId = refToNode(conf->senderRef); tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); @@ -469,7 +472,7 @@ Dbtc::execWAIT_DROP_TAB_REF(Signal* signal) tabPtr.i = ref->tableId; ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - ndbrequire(tabPtr.p->dropping == true); + ndbrequire(tabPtr.p->get_dropping() == true); Uint32 nodeId = refToNode(ref->senderRef); tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); @@ -506,7 +509,7 @@ Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId) for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){ jam(); ptrAss(tabPtr, tableRecord); - if(tabPtr.p->enabled && tabPtr.p->dropping){ + if(tabPtr.p->get_enabled() && tabPtr.p->get_dropping()){ if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){ jam(); conf->senderRef = calcLqhBlockRef(nodeId); @@ -547,7 +550,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) Uint32 senderData = req->senderData; DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType; - if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){ + if(!tabPtr.p->get_enabled() && rt == DropTabReq::OnlineDropTab){ jam(); DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -559,7 +562,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) return; } - if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){ + if(!tabPtr.p->get_dropping() && rt == DropTabReq::OnlineDropTab){ jam(); DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); ref->senderRef = reference(); @@ -571,8 +574,8 @@ Dbtc::execDROP_TAB_REQ(Signal* signal) return; } - tabPtr.p->enabled = false; - tabPtr.p->dropping = false; + tabPtr.p->set_enabled(false); + tabPtr.p->set_dropping(false); DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend(); conf->tableId = tabPtr.i; @@ -1215,16 +1218,14 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) const NodeId senderNodeId = refToNode(tapiBlockref); const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0; - if(!(senderNodeId == getNodeState().getSingleUserApi()) && - !getNodeState().getSingleUserMode()) { - if(!(sl==NodeState::SL_SINGLEUSER && - senderNodeId == getNodeState().getSingleUserApi())) { + { + { if (!(sl == NodeState::SL_STARTED || (sl == NodeState::SL_STARTING && local == true))) { jam(); - Uint32 errCode; - if(!(sl == NodeState::SL_SINGLEUSER && local)) + Uint32 errCode = 0; + if(!local) { switch(sl){ case NodeState::SL_STARTING: @@ -1232,6 +1233,8 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) break; case NodeState::SL_STOPPING_1: case NodeState::SL_STOPPING_2: + if (getNodeState().getSingleUserMode()) + break; case NodeState::SL_STOPPING_3: case NodeState::SL_STOPPING_4: if(getNodeState().stopping.systemShutdown) @@ -1240,16 +1243,18 @@ void Dbtc::execTCSEIZEREQ(Signal* signal) errCode = ZNODE_SHUTDOWN_IN_PROGRESS; break; case NodeState::SL_SINGLEUSER: - errCode = ZCLUSTER_IN_SINGLEUSER_MODE; break; default: errCode = ZWRONG_STATE; break; } - signal->theData[0] = tapiPointer; - signal->theData[1] = errCode; - sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); - return; + if (errCode) + { + signal->theData[0] = tapiPointer; + signal->theData[1] = errCode; + sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); + return; + } }//if (!(sl == SL_SINGLEUSER)) } //if } @@ -1737,8 +1742,14 @@ Dbtc::TCKEY_abort(Signal* signal, int place) * Initialize object before starting error handling */ initApiConnectRec(signal, apiConnectptr.p, true); +start_failure: switch(getNodeState().startLevel){ case NodeState::SL_STOPPING_2: + if (getNodeState().getSingleUserMode()) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + break; + } case NodeState::SL_STOPPING_3: case NodeState::SL_STOPPING_4: if(getNodeState().stopping.systemShutdown) @@ -1749,6 +1760,12 @@ Dbtc::TCKEY_abort(Signal* signal, int place) case NodeState::SL_SINGLEUSER: terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; break; + case NodeState::SL_STOPPING_1: + if (getNodeState().getSingleUserMode()) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + break; + } default: terrorCode = ZWRONG_STATE; break; @@ -1770,6 +1787,13 @@ Dbtc::TCKEY_abort(Signal* signal, int place) return; } + case 60: + { + jam(); + initApiConnectRec(signal, apiConnectptr.p, true); + apiConnectptr.p->m_exec_flag = 1; + goto start_failure; + } default: jam(); systemErrorLab(signal, __LINE__); @@ -1777,9 +1801,18 @@ Dbtc::TCKEY_abort(Signal* signal, int place) }//switch } +static +inline +bool +compare_transid(Uint32* val0, Uint32* val1) +{ + Uint32 tmp0 = val0[0] ^ val1[0]; + Uint32 tmp1 = val0[1] ^ val1[1]; + return (tmp0 | tmp1) == 0; +} + void Dbtc::execKEYINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; jamEntry(); apiConnectptr.i = signal->theData[0]; tmaxData = 20; @@ -1789,10 +1822,8 @@ void Dbtc::execKEYINFO(Signal* signal) }//if ptrAss(apiConnectptr, apiConnectRecord); ttransid_ptr = 1; - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false) + { TCKEY_abort(signal, 19); return; }//if @@ -2093,7 +2124,6 @@ void Dbtc::saveAttrbuf(Signal* signal) void Dbtc::execATTRINFO(Signal* signal) { - UintR compare_transid1, compare_transid2; UintR Tdata1 = signal->theData[0]; UintR Tlength = signal->length(); UintR TapiConnectFilesize = capiConnectFilesize; @@ -2108,17 +2138,13 @@ void Dbtc::execATTRINFO(Signal* signal) return; }//if - UintR Tdata2 = signal->theData[1]; - UintR Tdata3 = signal->theData[2]; ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1]; - compare_transid1 = regApiPtr->transid[0] ^ Tdata2; - compare_transid2 = regApiPtr->transid[1] ^ Tdata3; apiConnectptr.p = regApiPtr; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { + if (compare_transid(regApiPtr->transid, signal->theData+1) == false) + { DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength - << " transid("<<hex<<Tdata2<<", "<<Tdata3); + << " transid("<<hex<<signal->theData[1]<<", "<<signal->theData[2]); TCKEY_abort(signal, 19); return; }//if @@ -2390,6 +2416,7 @@ void Dbtc::initApiConnectRec(Signal* signal, regApiPtr->buddyPtr = RNIL; regApiPtr->currSavePointId = 0; regApiPtr->m_transaction_nodes.clear(); + regApiPtr->singleUserMode = 0; // Trigger data releaseFiredTriggerData(®ApiPtr->theFiredTriggers), // Index data @@ -2499,6 +2526,7 @@ Dbtc::seizeCacheRecord(Signal* signal) /*****************************************************************************/ void Dbtc::execTCKEYREQ(Signal* signal) { + Uint32 sendersNodeId = refToNode(signal->getSendersBlockRef()); UintR compare_transid1, compare_transid2; UintR titcLenAiInTckeyreq; UintR TkeyLength; @@ -2535,16 +2563,19 @@ void Dbtc::execTCKEYREQ(Signal* signal) ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex]; apiConnectptr.p = regApiPtr; - Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo); + Uint32 TstartFlag = TcKeyReq::getStartFlag(Treqinfo); Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo); Uint8 isIndexOp = regApiPtr->isIndexOp; bool isIndexOpReturn = regApiPtr->indexOpReturn; regApiPtr->isIndexOp = false; // Reset marker regApiPtr->m_exec_flag |= TexecFlag; + TableRecordPtr localTabptr; + localTabptr.i = TtabIndex; + localTabptr.p = &tableRecord[TtabIndex]; switch (regApiPtr->apiConnectstate) { case CS_CONNECTED:{ - if (TstartFlag == 1 && getAllowStartTransaction() == true){ + if (TstartFlag == 1 && getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ //--------------------------------------------------------------------- // Initialise API connect record if transaction is started. //--------------------------------------------------------------------- @@ -2552,7 +2583,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) initApiConnectRec(signal, regApiPtr); regApiPtr->m_exec_flag = TexecFlag; } else { - if(getAllowStartTransaction() == true){ + if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ /*------------------------------------------------------------------ * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO @@ -2562,9 +2593,9 @@ void Dbtc::execTCKEYREQ(Signal* signal) return; } else { /** - * getAllowStartTransaction() == false + * getAllowStartTransaction(sendersNodeId) == false */ - TCKEY_abort(signal, 57); + TCKEY_abort(signal, TexecFlag ? 60 : 57); return; }//if } @@ -2579,6 +2610,13 @@ void Dbtc::execTCKEYREQ(Signal* signal) * the state will be CS_STARTED */ jam(); + if (unlikely(getNodeState().getSingleUserMode()) && + getNodeState().getSingleUserApi() != sendersNodeId && + !localTabptr.p->singleUserMode) + { + TCKEY_abort(signal, TexecFlag ? 60 : 57); + return; + } initApiConnectRec(signal, regApiPtr); regApiPtr->m_exec_flag = TexecFlag; } else { @@ -2599,6 +2637,10 @@ void Dbtc::execTCKEYREQ(Signal* signal) case CS_ABORTING: if (regApiPtr->abortState == AS_IDLE) { if (TstartFlag == 1) { + if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == false){ + TCKEY_abort(signal, TexecFlag ? 60 : 57); + return; + } //-------------------------------------------------------------------- // Previous transaction had been aborted and the abort was completed. // It is then OK to start a new transaction again. @@ -2662,9 +2704,6 @@ void Dbtc::execTCKEYREQ(Signal* signal) return; }//switch - TableRecordPtr localTabptr; - localTabptr.i = TtabIndex; - localTabptr.p = &tableRecord[TtabIndex]; if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) { ; } else { @@ -2705,14 +2744,14 @@ void Dbtc::execTCKEYREQ(Signal* signal) /* */ /* ---------------------------------------------------------------------- */ - UintR TapiVersionNo = tcKeyReq->getAPIVersion(tcKeyReq->attrLen); + UintR TapiVersionNo = TcKeyReq::getAPIVersion(tcKeyReq->attrLen); UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec; regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1; regCachePtr->apiVersionNo = TapiVersionNo; UintR TapiConnectptrIndex = apiConnectptr.i; UintR TsenderData = tcKeyReq->senderData; - UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen); + UintR TattrLen = TcKeyReq::getAttrinfoLen(tcKeyReq->attrLen); UintR TattrinfoCount = c_counters.cattrinfoCount; regTcPtr->apiConnect = TapiConnectptrIndex; @@ -2723,6 +2762,8 @@ void Dbtc::execTCKEYREQ(Signal* signal) regTcPtr->savePointId = regApiPtr->currSavePointId; regApiPtr->executingIndexOp = RNIL; + regApiPtr->singleUserMode |= 1 << localTabptr.p->singleUserMode; + if (TcKeyReq::getExecutingTrigger(Treqinfo)) { // Save the TcOperationPtr for fireing operation regTcPtr->triggeringOperation = TsenderData; @@ -2738,21 +2779,21 @@ void Dbtc::execTCKEYREQ(Signal* signal) UintR TtabptrIndex = localTabptr.i; UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion; - Uint8 TOperationType = tcKeyReq->getOperationType(Treqinfo); + Uint8 TOperationType = TcKeyReq::getOperationType(Treqinfo); regCachePtr->tableref = TtabptrIndex; regCachePtr->schemaVersion = TtableSchemaVersion; regTcPtr->operation = TOperationType; - Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo); - Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo); - Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo); - Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo); + Uint8 TSimpleFlag = TcKeyReq::getSimpleFlag(Treqinfo); + Uint8 TDirtyFlag = TcKeyReq::getDirtyFlag(Treqinfo); + Uint8 TInterpretedFlag = TcKeyReq::getInterpretedFlag(Treqinfo); + Uint8 TDistrKeyFlag = TcKeyReq::getDistributionKeyFlag(Treqinfo); Uint8 TNoDiskFlag = TcKeyReq::getNoDiskFlag(Treqinfo); Uint8 TexecuteFlag = TexecFlag; - regCachePtr->opSimple = TSimpleFlag; - regCachePtr->opExec = TInterpretedFlag; regTcPtr->dirtyOp = TDirtyFlag; + regTcPtr->opSimple = TSimpleFlag; + regCachePtr->opExec = TInterpretedFlag; regCachePtr->distributionKeyIndicator = TDistrKeyFlag; regCachePtr->m_no_disk_flag = TNoDiskFlag; @@ -2762,10 +2803,10 @@ void Dbtc::execTCKEYREQ(Signal* signal) Uint32 TkeyIndex; Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo; { - Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo); + Uint32 TDistrGHIndex = TcKeyReq::getScanIndFlag(Treqinfo); Uint32 TDistrKeyIndex = TDistrGHIndex; - Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]); + Uint32 TscanInfo = TcKeyReq::getTakeOverScanInfo(TOptionalDataPtr[0]); regCachePtr->scanTakeOverInd = TDistrGHIndex; regCachePtr->scanInfo = TscanInfo; @@ -2787,7 +2828,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) regCachePtr->keydata[2] = Tdata3; regCachePtr->keydata[3] = Tdata4; - TkeyLength = tcKeyReq->getKeyLength(Treqinfo); + TkeyLength = TcKeyReq::getKeyLength(Treqinfo); Uint32 TAIDataIndex; if (TkeyLength > 8) { TAIDataIndex = TkeyIndex + 8; @@ -2800,7 +2841,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) }//if Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex]; - titcLenAiInTckeyreq = tcKeyReq->getAIInTcKeyReq(Treqinfo); + titcLenAiInTckeyreq = TcKeyReq::getAIInTcKeyReq(Treqinfo); regCachePtr->keylen = TkeyLength; regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq; regCachePtr->currReclenAi = titcLenAiInTckeyreq; @@ -2837,6 +2878,12 @@ void Dbtc::execTCKEYREQ(Signal* signal) tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref); tmp.p->apiConnectPtr = TapiIndex; tmp.p->noOfLqhs = 0; +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } } @@ -2848,7 +2895,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND * TEMP TABLES DON'T PARTICIPATE. * -------------------------------------------------------------------- */ - if (localTabptr.p->storedTable) { + if (localTabptr.p->get_storedTable()) { coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17; } c_counters.cwriteCount = TwriteCount + 1; @@ -2865,14 +2912,14 @@ void Dbtc::execTCKEYREQ(Signal* signal) }//switch }//if - Uint32 TabortOption = tcKeyReq->getAbortOption(Treqinfo); + Uint32 TabortOption = TcKeyReq::getAbortOption(Treqinfo); regTcPtr->m_execAbortOption = TabortOption; /*------------------------------------------------------------------------- * Check error handling per operation * If CommitFlag is set state accordingly and check for early abort *------------------------------------------------------------------------*/ - if (tcKeyReq->getCommitFlag(Treqinfo) == 1) { + if (TcKeyReq::getCommitFlag(Treqinfo) == 1) { ndbrequire(TexecuteFlag); regApiPtr->apiConnectstate = CS_REC_COMMITTING; } else { @@ -3200,9 +3247,10 @@ void Dbtc::sendlqhkeyreq(Signal* signal, LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd); Tdata10 = 0; - sig0 = regCachePtr->opSimple; + sig0 = regTcPtr->opSimple; sig1 = regTcPtr->operation; - bool simpleRead = (sig1 == ZREAD && sig0 == ZTRUE); + sig2 = regTcPtr->dirtyOp; + bool dirtyRead = (sig1 == ZREAD && sig2 == ZTRUE); LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen); LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo); if (unlikely(version < NDBD_ROWID_VERSION)) @@ -3215,7 +3263,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal, // Indicate Application Reference is present in bit 15 /* ---------------------------------------------------------------------- */ LqhKeyReq::setApplicationAddressFlag(Tdata10, 1); - LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp); + LqhKeyReq::setDirtyFlag(Tdata10, sig2); LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec); LqhKeyReq::setSimpleFlag(Tdata10, sig0); LqhKeyReq::setOperation(Tdata10, sig1); @@ -3276,7 +3324,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal, sig5 = regTcPtr->clientData; sig6 = regCachePtr->scanInfo; - if (! simpleRead) + if (! dirtyRead) { regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[0]); regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[1]); @@ -3349,7 +3397,6 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal, BlockReference TBRef) { TcConnectRecord * const regTcPtr = tcConnectptr.p; - CacheRecord * const regCachePtr = cachePtr.p; #ifdef ERROR_INSERT ApiConnectRecord * const regApiPtr = apiConnectptr.p; if (ERROR_INSERTED(8009)) { @@ -3374,8 +3421,8 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal, if (anAttrBufIndex == RNIL) { UintR TtcTimer = ctcTimer; UintR Tread = (regTcPtr->operation == ZREAD); - UintR Tsimple = (regCachePtr->opSimple == ZTRUE); - UintR Tboth = Tread & Tsimple; + UintR Tdirty = (regTcPtr->dirtyOp == ZTRUE); + UintR Tboth = Tread & Tdirty; setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); jam(); /*-------------------------------------------------------------------- @@ -3384,7 +3431,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal, releaseAttrinfo(); if (Tboth) { jam(); - releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p); + releaseDirtyRead(signal, apiConnectptr, tcConnectptr.p); return; }//if regTcPtr->tcConnectstate = OS_OPERATING; @@ -3444,11 +3491,11 @@ void Dbtc::releaseAttrinfo() }//Dbtc::releaseAttrinfo() /* ========================================================================= */ -/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */ +/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY OPERATION ------- */ /* ========================================================================= */ -void Dbtc::releaseSimpleRead(Signal* signal, - ApiConnectRecordPtr regApiPtr, - TcConnectRecord* regTcPtr) +void Dbtc::releaseDirtyRead(Signal* signal, + ApiConnectRecordPtr regApiPtr, + TcConnectRecord* regTcPtr) { Uint32 Ttckeyrec = regApiPtr.p->tckeyrec; Uint32 TclientData = regTcPtr->clientData; @@ -3458,7 +3505,7 @@ void Dbtc::releaseSimpleRead(Signal* signal, ConnectionState state = regApiPtr.p->apiConnectstate; regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData; - regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode; + regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::DirtyReadBit | Tnode; regApiPtr.p->tckeyrec = Ttckeyrec + 2; unlinkReadyTcCon(signal); @@ -3488,8 +3535,8 @@ void Dbtc::releaseSimpleRead(Signal* signal, /** * Emulate LQHKEYCONF */ - lqhKeyConf_checkTransactionState(signal, regApiPtr.p); -}//Dbtc::releaseSimpleRead() + lqhKeyConf_checkTransactionState(signal, regApiPtr); +}//Dbtc::releaseDirtyRead() /* ------------------------------------------------------------------------- */ /* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */ @@ -3671,12 +3718,13 @@ void Dbtc::execLQHKEYCONF(Signal* signal) TCKEY_abort(signal, 29); return; }//if - ApiConnectRecord * const regApiPtr = - &localApiConnectRecord[TapiConnectptrIndex]; + Ptr<ApiConnectRecord> regApiPtr; + regApiPtr.i = TapiConnectptrIndex; + regApiPtr.p = &localApiConnectRecord[TapiConnectptrIndex]; apiConnectptr.i = TapiConnectptrIndex; - apiConnectptr.p = regApiPtr; - compare_transid1 = regApiPtr->transid[0] ^ Ttrans1; - compare_transid2 = regApiPtr->transid[1] ^ Ttrans2; + apiConnectptr.p = regApiPtr.p; + compare_transid1 = regApiPtr.p->transid[0] ^ Ttrans1; + compare_transid2 = regApiPtr.p->transid[1] ^ Ttrans2; compare_transid1 = compare_transid1 | compare_transid2; if (compare_transid1 != 0) { warningReport(signal, 24); @@ -3688,25 +3736,25 @@ void Dbtc::execLQHKEYCONF(Signal* signal) systemErrorLab(signal, __LINE__); }//if if (ERROR_INSERTED(8003)) { - if (regApiPtr->apiConnectstate == CS_STARTED) { + if (regApiPtr.p->apiConnectstate == CS_STARTED) { CLEAR_ERROR_INSERT_VALUE; return; }//if }//if if (ERROR_INSERTED(8004)) { - if (regApiPtr->apiConnectstate == CS_RECEIVING) { + if (regApiPtr.p->apiConnectstate == CS_RECEIVING) { CLEAR_ERROR_INSERT_VALUE; return; }//if }//if if (ERROR_INSERTED(8005)) { - if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) { + if (regApiPtr.p->apiConnectstate == CS_REC_COMMITTING) { CLEAR_ERROR_INSERT_VALUE; return; }//if }//if if (ERROR_INSERTED(8006)) { - if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { + if (regApiPtr.p->apiConnectstate == CS_START_COMMITTING) { CLEAR_ERROR_INSERT_VALUE; return; }//if @@ -3721,10 +3769,12 @@ void Dbtc::execLQHKEYCONF(Signal* signal) regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref); regTcPtr->noFiredTriggers = noFired; - UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec; + UintR Ttckeyrec = (UintR)regApiPtr.p->tckeyrec; UintR TclientData = regTcPtr->clientData; UintR TdirtyOp = regTcPtr->dirtyOp; - ConnectionState TapiConnectstate = regApiPtr->apiConnectstate; + Uint32 TopSimple = regTcPtr->opSimple; + Uint32 Toperation = regTcPtr->operation; + ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate; if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) { TCKEY_abort(signal, 30); return; @@ -3749,23 +3799,34 @@ void Dbtc::execLQHKEYCONF(Signal* signal) * since they will enter execLQHKEYCONF a second time * Skip counting internally generated TcKeyReq */ - regApiPtr->tcSendArray[Ttckeyrec] = TclientData; - regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi; - regApiPtr->tckeyrec = Ttckeyrec + 2; + regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData; + regApiPtr.p->tcSendArray[Ttckeyrec + 1] = treadlenAi; + regApiPtr.p->tckeyrec = Ttckeyrec + 2; }//if }//if - if (TdirtyOp == ZTRUE) { - UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec; + if (TdirtyOp == ZTRUE) + { + UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; jam(); releaseDirtyWrite(signal); - regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1; - } else { + regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1; + } + else if (Toperation == ZREAD && TopSimple) + { + UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; + jam(); + unlinkReadyTcCon(signal); + releaseTcCon(); + regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1; + } + else + { jam(); if (noFired == 0) { jam(); // No triggers to execute - UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec; - regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1; + UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec; + regApiPtr.p->lqhkeyconfrec = Tlqhkeyconfrec + 1; regTcPtr->tcConnectstate = OS_PREPARED; } }//if @@ -3795,21 +3856,18 @@ void Dbtc::execLQHKEYCONF(Signal* signal) jam(); if (regTcPtr->isIndexOp) { jam(); - setupIndexOpReturn(regApiPtr, regTcPtr); + setupIndexOpReturn(regApiPtr.p, regTcPtr); } lqhKeyConf_checkTransactionState(signal, regApiPtr); } else { // We have fired triggers jam(); saveTriggeringOpState(signal, regTcPtr); - if (regTcPtr->noReceivedTriggers == noFired) { - ApiConnectRecordPtr transPtr; - + if (regTcPtr->noReceivedTriggers == noFired) + { // We have received all data jam(); - transPtr.i = TapiConnectptrIndex; - transPtr.p = regApiPtr; - executeTriggers(signal, &transPtr); + executeTriggers(signal, ®ApiPtr); } // else wait for more trigger data } @@ -3833,7 +3891,7 @@ void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr, */ void Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, - ApiConnectRecord * const apiConnectPtrP) + Ptr<ApiConnectRecord> regApiPtr) { /*---------------------------------------------------------------*/ /* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */ @@ -3844,9 +3902,9 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, /* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */ /* TRANSACTION */ /*---------------------------------------------------------------*/ - ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate; - UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec; - UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec; + ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate; + UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec; + UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec; switch (TapiConnectstate) { @@ -3856,11 +3914,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, diverify010Lab(signal); return; } else if (TnoOfOutStanding > 0) { - if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) { + if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { jam(); sendtckeyconf(signal, 0); return; - } else if (apiConnectPtrP->indexOpReturn) { + } else if (regApiPtr.p->indexOpReturn) { jam(); sendtckeyconf(signal, 0); return; @@ -3879,11 +3937,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, sendtckeyconf(signal, 2); return; } else { - if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) { + if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { jam(); sendtckeyconf(signal, 0); return; - } else if (apiConnectPtrP->indexOpReturn) { + } else if (regApiPtr.p->indexOpReturn) { jam(); sendtckeyconf(signal, 0); return; @@ -3893,11 +3951,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, return; case CS_REC_COMMITTING: if (TnoOfOutStanding > 0) { - if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) { + if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { jam(); sendtckeyconf(signal, 0); return; - } else if (apiConnectPtrP->indexOpReturn) { + } else if (regApiPtr.p->indexOpReturn) { jam(); sendtckeyconf(signal, 0); return; @@ -3914,7 +3972,7 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, /* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */ /* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */ /*---------------------------------------------------------------*/ - apiConnectPtrP->tckeyrec = 0; + regApiPtr.p->tckeyrec = 0; return; default: TCKEY_abort(signal, 46); @@ -4172,34 +4230,46 @@ void Dbtc::diverify010Lab(Signal* signal) jam(); systemErrorLab(signal, __LINE__); }//if - if (TfirstfreeApiConnectCopy != RNIL) { - seizeApiConnectCopy(signal); - regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT; - /*----------------------------------------------------------------------- - * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS. - * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC - * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE - * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS. - *-----------------------------------------------------------------------*/ - EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1); - if (signal->theData[2] == 0) { - execDIVERIFYCONF(signal); + + if (regApiPtr->lqhkeyreqrec) + { + if (TfirstfreeApiConnectCopy != RNIL) { + seizeApiConnectCopy(signal); + regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT; + /*----------------------------------------------------------------------- + * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS + * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC + * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE + * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS. + *---------------------------------------------------------------------*/ + EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1); + if (signal->theData[2] == 0) { + execDIVERIFYCONF(signal); + } + return; + } else { + /*----------------------------------------------------------------------- + * There were no free copy connections available. We must abort the + * transaction since otherwise we will have a problem with the report + * to the application. + * This should more or less not happen but if it happens we do + * not want to crash and we do not want to create code to handle it + * properly since it is difficult to test it and will be complex to + * handle a problem more or less not occurring. + *---------------------------------------------------------------------*/ + terrorCode = ZSEIZE_API_COPY_ERROR; + abortErrorLab(signal); + return; } - return; - } else { - /*----------------------------------------------------------------------- - * There were no free copy connections available. We must abort the - * transaction since otherwise we will have a problem with the report - * to the application. - * This should more or less not happen but if it happens we do not want to - * crash and we do not want to create code to handle it properly since - * it is difficult to test it and will be complex to handle a problem - * more or less not occurring. - *-----------------------------------------------------------------------*/ - terrorCode = ZSEIZE_API_COPY_ERROR; - abortErrorLab(signal); - return; - }//if + } + else + { + jam(); + sendtckeyconf(signal, 1); + regApiPtr->apiConnectstate = CS_CONNECTED; + regApiPtr->m_transaction_nodes.clear(); + setApiConTimer(apiConnectptr.i, 0,__LINE__); + } }//Dbtc::diverify010Lab() /* ------------------------------------------------------------------------- */ @@ -4681,6 +4751,7 @@ void Dbtc::copyApi(Signal* signal) regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec; regApiPtr->commitAckMarker = TcommitAckMarker; regApiPtr->m_transaction_nodes = Tnodes; + regApiPtr->singleUserMode = 0; gcpPtr.i = TgcpPointer; ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord); @@ -4692,6 +4763,7 @@ void Dbtc::copyApi(Signal* signal) regTmpApiPtr->firstTcConnect = RNIL; regTmpApiPtr->lastTcConnect = RNIL; regTmpApiPtr->m_transaction_nodes.clear(); + regTmpApiPtr->singleUserMode = 0; releaseAllSeizedIndexOperations(regTmpApiPtr); }//Dbtc::copyApi() @@ -5212,16 +5284,8 @@ void Dbtc::execLQHKEYREF(Signal* signal) regApiPtr->lqhkeyreqrec--; if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) { if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { - if(regApiPtr->lqhkeyconfrec) { - jam(); - diverify010Lab(signal); - } else { - jam(); - sendtckeyconf(signal, 1); - regApiPtr->apiConnectstate = CS_CONNECTED; - regApiPtr->m_transaction_nodes.clear(); - setApiConTimer(apiConnectptr.i, 0,__LINE__); - } + jam(); + diverify010Lab(signal); return; } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) { jam(); @@ -5410,11 +5474,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal) } }//Dbtc::execTC_COMMITREQ() +/** + * TCROLLBACKREQ + * + * Format is: + * + * thedata[0] = apiconnectptr + * thedata[1] = transid[0] + * thedata[2] = transid[1] + * OPTIONAL thedata[3] = flags + * + * Flags: + * 0x1 = potentiallyBad data from API (try not to assert) + */ void Dbtc::execTCROLLBACKREQ(Signal* signal) { + bool potentiallyBad= false; UintR compare_transid1, compare_transid2; jamEntry(); + + if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1))) + { + ndbout_c("Trying to roll back potentially bad txn\n"); + potentiallyBad= true; + } + apiConnectptr.i = signal->theData[0]; if (apiConnectptr.i >= capiConnectFilesize) { goto TC_ROLL_warning; @@ -5501,12 +5586,14 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal) TC_ROLL_warning: jam(); - warningHandlerLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + warningHandlerLab(signal, __LINE__); return; TC_ROLL_system_error: jam(); - systemErrorLab(signal, __LINE__); + if(likely(potentiallyBad==false)) + systemErrorLab(signal, __LINE__); return; }//Dbtc::execTCROLLBACKREQ() @@ -6161,9 +6248,11 @@ and otherwise we spread it out 310 ms. void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr) { Uint32 end_ptr, time_passed, time_out_value, mask_value; + Uint32 old_mask_value= 0; const Uint32 api_con_sz= capiConnectFilesize; const Uint32 tc_timer= ctcTimer; const Uint32 time_out_param= ctimeOutValue; + const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; ctimeOutCheckHeartbeat = tc_timer; @@ -6184,16 +6273,50 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr) jam(); mask_value= 31; } + if (time_out_param != old_time_out_param && + getNodeState().getSingleUserMode()) + { + // abort during single user mode, use old_mask_value as flag + // and calculate value to be used for connections with allowed api + if (old_time_out_param > 300) { + jam(); + old_mask_value= 63; + } else if (old_time_out_param < 30) { + jam(); + old_mask_value= 7; + } else { + jam(); + old_mask_value= 31; + } + } for ( ; api_con_ptr < end_ptr; api_con_ptr++) { Uint32 api_timer= getApiConTimer(api_con_ptr); jam(); if (api_timer != 0) { - time_out_value= time_out_param + (api_con_ptr & mask_value); + Uint32 error= ZTIME_OUT_ERROR; + time_out_value= time_out_param + (ndb_rand() & mask_value); + if (unlikely(old_mask_value)) // abort during single user mode + { + apiConnectptr.i = api_con_ptr; + ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); + if ((getNodeState().getSingleUserApi() == + refToNode(apiConnectptr.p->ndbapiBlockref)) || + !(apiConnectptr.p->singleUserMode & (1 << NDB_SUM_LOCKED))) + { + // api allowed during single user, use original timeout + time_out_value= + old_time_out_param + (api_con_ptr & old_mask_value); + } + else + { + error= ZCLUSTER_IN_SINGLEUSER_MODE; + } + } time_passed= tc_timer - api_timer; if (time_passed > time_out_value) { jam(); - timeOutFoundLab(signal, api_con_ptr, ZTIME_OUT_ERROR); + timeOutFoundLab(signal, api_con_ptr, error); api_con_ptr++; break; } @@ -6233,7 +6356,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode) << " code: " << errCode); switch (apiConnectptr.p->apiConnectstate) { case CS_STARTED: - if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){ + if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec && + errCode != ZCLUSTER_IN_SINGLEUSER_MODE){ jam(); /* We are waiting for application to continue the transaction. In this @@ -6806,6 +6930,33 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) c_scan_frag_pool.getPtr(ptr, TscanConPtr); DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState); + const Uint32 time_out_param= ctimeOutValue; + const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; + + if (unlikely(time_out_param != old_time_out_param && + getNodeState().getSingleUserMode())) + { + jam(); + ScanRecordPtr scanptr; + scanptr.i = ptr.p->scanRec; + ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); + ApiConnectRecordPtr TlocalApiConnectptr; + TlocalApiConnectptr.i = scanptr.p->scanApiRec; + ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord); + + if (refToNode(TlocalApiConnectptr.p->ndbapiBlockref) == + getNodeState().getSingleUserApi()) + { + jam(); + Uint32 val = ctcTimer - ptr.p->scanFragTimer; + if (val <= old_time_out_param) + { + jam(); + goto next; + } + } + } + /*-------------------------------------------------------------------------*/ // The scan fragment has expired its timeout. Check its state to decide // what to do. @@ -6867,6 +7018,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) break; }//switch +next: signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL; signal->theData[1] = TscanConPtr + 1; sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); @@ -6946,7 +7098,6 @@ void Dbtc::execGCP_NOMORETRANS(Signal* signal) /*****************************************************************************/ void Dbtc::execNODE_FAILREP(Signal* signal) { - HostRecordPtr tmpHostptr; jamEntry(); NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; @@ -8086,6 +8237,7 @@ void Dbtc::initApiConnectFail(Signal* signal) apiConnectptr.p->ndbapiConnect = 0; apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; setApiConTimer(apiConnectptr.i, 0, __LINE__); switch(ttransStatus){ case LqhTransConf::Committed: @@ -8124,6 +8276,13 @@ void Dbtc::initApiConnectFail(Signal* signal) tmp.p->noOfLqhs = 1; tmp.p->lqhNodeId[0] = tnodeid; tmp.p->apiConnectPtr = apiConnectptr.i; + +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } }//Dbtc::initApiConnectFail() @@ -8280,6 +8439,12 @@ void Dbtc::updateApiStateFail(Signal* signal) tmp.p->noOfLqhs = 1; tmp.p->lqhNodeId[0] = tnodeid; tmp.p->apiConnectPtr = apiConnectptr.i; +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } else { jam(); @@ -8685,6 +8850,14 @@ void Dbtc::execSCAN_TABREQ(Signal* signal) } } + if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && + getNodeState().getSingleUserApi() != + refToNode(apiConnectptr.p->ndbapiBlockref)) + { + errCode = ZCLUSTER_IN_SINGLEUSER_MODE; + goto SCAN_TAB_error; + } + seizeTcConnect(signal); tcConnectptr.p->apiConnect = apiConnectptr.i; tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN; @@ -9980,6 +10153,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = tiacTmp - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10008,6 +10182,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = (2 * tiacTmp) - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10036,6 +10211,7 @@ void Dbtc::initApiConnect(Signal* signal) apiConnectptr.p->buddyPtr = RNIL; apiConnectptr.p->currSavePointId = 0; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; }//for apiConnectptr.i = (3 * tiacTmp) - 1; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); @@ -10221,10 +10397,11 @@ void Dbtc::initTable(Signal* signal) refresh_watch_dog(); ptrAss(tabptr, tableRecord); tabptr.p->currentSchemaVersion = 0; - tabptr.p->storedTable = true; + tabptr.p->m_flags = 0; + tabptr.p->set_storedTable(true); tabptr.p->tableType = 0; - tabptr.p->enabled = false; - tabptr.p->dropping = false; + tabptr.p->set_enabled(false); + tabptr.p->set_dropping(false); tabptr.p->noOfKeyAttr = 0; tabptr.p->hasCharAttr = 0; tabptr.p->noOfDistrKeys = 0; @@ -10358,6 +10535,7 @@ void Dbtc::releaseAbortResources(Signal* signal) apiConnectptr.p->firstTcConnect = RNIL; apiConnectptr.p->lastTcConnect = RNIL; apiConnectptr.p->m_transaction_nodes.clear(); + apiConnectptr.p->singleUserMode = 0; // MASV let state be CS_ABORTING until all // signals in the "air" have been received. Reset to CS_CONNECTED @@ -10997,36 +11175,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal) } }//Dbtc::execDUMP_STATE_ORD() -void Dbtc::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - - switch (var) { - - case TransactionInactiveTime: - jam(); - set_appl_timeout_value(val); - break; - case TransactionDeadlockDetectionTimeout: - set_timeout_value(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfConcurrentProcessesHandleTakeover: - set_no_parallel_takeover(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif -} - void Dbtc::execABORT_ALL_REQ(Signal* signal) { jamEntry(); @@ -11036,7 +11184,7 @@ void Dbtc::execABORT_ALL_REQ(Signal* signal) const Uint32 senderData = req->senderData; const BlockReference senderRef = req->senderRef; - if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){ + if(getAllowStartTransaction(refToNode(senderRef), 0) == true && !getNodeState().getSingleUserMode()){ jam(); ref->senderData = senderData; @@ -11460,10 +11608,22 @@ void Dbtc::execTCINDXREQ(Signal* signal) // This is a newly started transaction, clean-up releaseAllSeizedIndexOperations(regApiPtr); + regApiPtr->apiConnectstate = CS_STARTED; regApiPtr->transid[0] = tcIndxReq->transId1; regApiPtr->transid[1] = tcIndxReq->transId2; }//if + if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && + getNodeState().getSingleUserApi() != + refToNode(regApiPtr->ndbapiBlockref)) + { + terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; + regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo); + apiConnectptr = transPtr; + abortErrorLab(signal); + return; + } + if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) { jam(); // Failed to allocate index operation @@ -11484,25 +11644,34 @@ void Dbtc::execTCINDXREQ(Signal* signal) // If operation is readTupleExclusive or updateTuple then read index // table with exclusive lock Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo); - Uint32 attrLength = tcIndxReq->attrLen; + Uint32 attrLength = TcKeyReq::getAttrinfoLen(tcIndxReq->attrLen); indexOp->expectedKeyInfo = indexLength; Uint32 includedIndexLength = MIN(indexLength, indexBufSize); indexOp->expectedAttrInfo = attrLength; Uint32 includedAttrLength = MIN(attrLength, attrBufSize); - if (saveINDXKEYINFO(signal, - indexOp, - dataPtr, - includedIndexLength)) { + + int ret; + if ((ret = saveINDXKEYINFO(signal, + indexOp, + dataPtr, + includedIndexLength)) == 0) + { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); return; } + else if (ret == -1) + { + jam(); + return; + } + dataPtr += includedIndexLength; if (saveINDXATTRINFO(signal, indexOp, dataPtr, - includedAttrLength)) { + includedAttrLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11605,13 +11774,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXKEYINFO(signal, indexOp, src, - keyInfoLength)) { + keyInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); @@ -11638,17 +11819,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal) TcIndexOperationPtr indexOpPtr; TcIndexOperation* indexOp; + if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false) + { + TCKEY_abort(signal, 19); + return; + } + + if (regApiPtr->apiConnectstate == CS_ABORTING) + { + jam(); + return; + } + if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) { indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); if (saveINDXATTRINFO(signal, indexOp, src, - attrInfoLength)) { + attrInfoLength) == 0) { jam(); // We have received all we need readIndexTable(signal, regApiPtr, indexOp); + return; } + return; } } @@ -11656,12 +11851,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal) * Save signal INDXKEYINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXKEYINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->keyInfo.append(src, len)) { + if (ERROR_INSERTED(8052) || !indexOp->keyInfo.append(src, len)) { jam(); // Failed to seize keyInfo, abort transaction #ifdef VM_TRACE @@ -11671,15 +11867,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) @@ -11691,12 +11889,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) * Save signal INDXATTRINFO * Return true if we have received all needed data */ -bool Dbtc::saveINDXATTRINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) +int +Dbtc::saveINDXATTRINFO(Signal* signal, + TcIndexOperation* indexOp, + const Uint32 *src, + Uint32 len) { - if (!indexOp->attrInfo.append(src, len)) { + if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) { jam(); #ifdef VM_TRACE ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n"); @@ -11704,15 +11903,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal, apiConnectptr.i = indexOp->connectionIndex; ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; + terrorCode = 289; + if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) + apiConnectptr.p->m_exec_flag= 1; abortErrorLab(signal); - return false; + return -1; } if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { jam(); - return true; + return 0; } - return false; + return 1; } bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp) @@ -11864,8 +12065,6 @@ void Dbtc::execTCKEYREF(Signal* signal) } const UintR TconnectIndex = indexOp->connectionIndex; ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex]; - Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo; - Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo); switch(indexOp->indexOpState) { case(IOS_NOOP): { @@ -11898,6 +12097,9 @@ void Dbtc::execTCKEYREF(Signal* signal) tcIndxRef->transId[0] = tcKeyRef->transId[0]; tcIndxRef->transId[1] = tcKeyRef->transId[1]; tcIndxRef->errorCode = tcKeyRef->errorCode; + + releaseIndexOperation(regApiPtr, indexOp); + sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); return; @@ -12442,7 +12644,18 @@ void Dbtc::executeIndexOperation(Signal* signal, bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr, TcIndexOperationPtr& indexOpPtr) { - return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr); + if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr)) + { + ndbassert(indexOpPtr.p->expectedKeyInfo == 0); + ndbassert(indexOpPtr.p->keyInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedAttrInfo == 0); + ndbassert(indexOpPtr.p->attrInfo.getSize() == 0); + ndbassert(indexOpPtr.p->expectedTransIdAI == 0); + ndbassert(indexOpPtr.p->transIdAI.getSize() == 0); + return true; + } + + return false; } void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, @@ -13304,9 +13517,9 @@ void Dbtc::deleteFromIndexTable(Signal* signal, Uint32 Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const { - if(!enabled) + if(!get_enabled()) return ZNO_SUCH_TABLE; - if(dropping) + if(get_dropping()) return ZDROP_TABLE_IN_PROGRESS; if(table_version_major(schemaVersion) != table_version_major(currentSchemaVersion)) return ZWRONG_SCHEMA_VERSION_ERROR; @@ -13328,7 +13541,6 @@ Dbtc::execROUTE_ORD(Signal* signal) Uint32 dstRef = ord->dstRef; Uint32 srcRef = ord->srcRef; Uint32 gsn = ord->gsn; - Uint32 cnt = ord->cnt; if (likely(getNodeInfo(refToNode(dstRef)).m_connected)) { diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 08332a2e1a1..3db91c55849 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -32,6 +32,82 @@ #include <../pgman.hpp> #include <../tsman.hpp> +// jams +#undef jam +#undef jamEntry +#ifdef DBTUP_BUFFER_CPP +#define jam() jamLine(10000 + __LINE__) +#define jamEntry() jamEntryLine(10000 + __LINE__) +#endif +#ifdef DBTUP_ROUTINES_CPP +#define jam() jamLine(15000 + __LINE__) +#define jamEntry() jamEntryLine(15000 + __LINE__) +#endif +#ifdef DBTUP_COMMIT_CPP +#define jam() jamLine(20000 + __LINE__) +#define jamEntry() jamEntryLine(20000 + __LINE__) +#endif +#ifdef DBTUP_FIXALLOC_CPP +#define jam() jamLine(25000 + __LINE__) +#define jamEntry() jamEntryLine(25000 + __LINE__) +#endif +#ifdef DBTUP_TRIGGER_CPP +#define jam() jamLine(30000 + __LINE__) +#define jamEntry() jamEntryLine(30000 + __LINE__) +#endif +#ifdef DBTUP_ABORT_CPP +#define jam() jamLine(35000 + __LINE__) +#define jamEntry() jamEntryLine(35000 + __LINE__) +#endif +#ifdef DBTUP_PAGE_MAP_CPP +#define jam() jamLine(40000 + __LINE__) +#define jamEntry() jamEntryLine(40000 + __LINE__) +#endif +#ifdef DBTUP_PAG_MAN_CPP +#define jam() jamLine(45000 + __LINE__) +#define jamEntry() jamEntryLine(45000 + __LINE__) +#endif +#ifdef DBTUP_STORE_PROC_DEF_CPP +#define jam() jamLine(50000 + __LINE__) +#define jamEntry() jamEntryLine(50000 + __LINE__) +#endif +#ifdef DBTUP_META_CPP +#define jam() jamLine(55000 + __LINE__) +#define jamEntry() jamEntryLine(55000 + __LINE__) +#endif +#ifdef DBTUP_TAB_DES_MAN_CPP +#define jam() jamLine(60000 + __LINE__) +#define jamEntry() jamEntryLine(60000 + __LINE__) +#endif +#ifdef DBTUP_GEN_CPP +#define jam() jamLine(65000 + __LINE__) +#define jamEntry() jamEntryLine(65000 + __LINE__) +#endif +#ifdef DBTUP_INDEX_CPP +#define jam() jamLine(70000 + __LINE__) +#define jamEntry() jamEntryLine(70000 + __LINE__) +#endif +#ifdef DBTUP_DEBUG_CPP +#define jam() jamLine(75000 + __LINE__) +#define jamEntry() jamEntryLine(75000 + __LINE__) +#endif +#ifdef DBTUP_VAR_ALLOC_CPP +#define jam() jamLine(80000 + __LINE__) +#define jamEntry() jamEntryLine(80000 + __LINE__) +#endif +#ifdef DBTUP_SCAN_CPP +#define jam() jamLine(85000 + __LINE__) +#define jamEntry() jamEntryLine(85000 + __LINE__) +#endif +#ifdef DBTUP_DISK_ALLOC_CPP +#define jam() jamLine(90000 + __LINE__) +#define jamEntry() jamEntryLine(90000 + __LINE__) +#endif +#ifndef jam +#define jam() jamLine(__LINE__) +#define jamEntry() jamEntryLine(__LINE__) +#endif + #ifdef VM_TRACE inline const char* dbgmask(const Bitmask<MAXNROFATTRIBUTESINWORDS>& bm) { static int i=0; static char buf[5][200]; @@ -70,22 +146,23 @@ inline const Uint32* ALIGN_WORD(const void* ptr) // only reports the line number in the file it currently is located in. // // DbtupExecQuery.cpp 0 -// DbtupBuffer.cpp 2000 -// DbtupRoutines.cpp 3000 -// DbtupCommit.cpp 5000 -// DbtupFixAlloc.cpp 6000 -// DbtupTrigger.cpp 7000 -// DbtupAbort.cpp 9000 -// DbtupPageMap.cpp 14000 -// DbtupPagMan.cpp 16000 -// DbtupStoredProcDef.cpp 18000 -// DbtupMeta.cpp 20000 -// DbtupTabDesMan.cpp 22000 -// DbtupGen.cpp 24000 -// DbtupIndex.cpp 28000 -// DbtupDebug.cpp 30000 -// DbtupVarAlloc.cpp 32000 -// DbtupScan.cpp 33000 +// DbtupBuffer.cpp 10000 +// DbtupRoutines.cpp 15000 +// DbtupCommit.cpp 20000 +// DbtupFixAlloc.cpp 25000 +// DbtupTrigger.cpp 30000 +// DbtupAbort.cpp 35000 +// DbtupPageMap.cpp 40000 +// DbtupPagMan.cpp 45000 +// DbtupStoredProcDef.cpp 50000 +// DbtupMeta.cpp 55000 +// DbtupTabDesMan.cpp 60000 +// DbtupGen.cpp 65000 +// DbtupIndex.cpp 70000 +// DbtupDebug.cpp 75000 +// DbtupVarAlloc.cpp 80000 +// DbtupScan.cpp 85000 +// DbtupDiskAlloc.cpp 90000 //------------------------------------------------------------------ /* @@ -516,6 +593,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr; return (m_key.m_file_no << 16) ^ m_key.m_page_idx; } + Extent_info() {} bool equal(const Extent_info & rec) const { return m_key.m_file_no == rec.m_key.m_file_no && m_key.m_page_idx == rec.m_key.m_page_idx; @@ -670,6 +748,7 @@ struct Operationrec { Uint32 currentAttrinbufLen; //Used until copyAttrinfo }; + Operationrec() {} bool is_first_operation() const { return prevActiveOp == RNIL;} bool is_last_operation() const { return nextActiveOp == RNIL;} @@ -677,6 +756,7 @@ struct Operationrec { union { Uint32 firstAttrinbufrec; //Used until copyAttrinfo }; + Uint32 m_any_value; union { Uint32 lastAttrinbufrec; //Used until copyAttrinfo Uint32 nextPool; @@ -791,6 +871,7 @@ typedef Ptr<PageRange> PageRangePtr; /* WHEN THE TRIGGER IS DEACTIVATED. */ /* **************************************** */ struct TupTriggerData { + TupTriggerData() {} /** * Trigger id, used by DICT/TRIX to identify the trigger @@ -1276,6 +1357,7 @@ typedef Ptr<HostBuffer> HostBufferPtr; STATIC_CONST( LCP_KEEP = 0x02000000 ); // Should be returned in LCP STATIC_CONST( FREE = 0x02800000 ); // Is free + Tuple_header() {} Uint32 get_tuple_version() const { return m_header_bits & TUP_VERSION_MASK; } @@ -1519,7 +1601,6 @@ private: void execTUP_ABORTREQ(Signal* signal); void execNDB_STTOR(Signal* signal); void execREAD_CONFIG_REQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execDROP_TAB_REQ(Signal* signal); void execALTER_TAB_REQ(Signal* signal); void execTUP_DEALLOCREQ(Signal* signal); @@ -1786,7 +1867,8 @@ private: Operationrec* regOperPtr, Fragrecord* regFragPtr, Tablerec* regTabPtr, - KeyReqStruct* req_struct); + KeyReqStruct* req_struct, + bool disk); //------------------------------------------------------------------ //------------------------------------------------------------------ diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp index b03a07cd6e9..59adfbfde89 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp @@ -14,21 +14,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_ABORT_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(9000 + __LINE__); } -#define ljamEntry() { jamEntryLine(9000 + __LINE__); } - void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr) { if (regOperPtr->storedProcedureId == RNIL) { - ljam(); + jam(); freeAttrinbufrec(regOperPtr->firstAttrinbufrec); } else { - ljam(); + jam(); StoredProcPtr storedPtr; c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId); ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); @@ -37,6 +35,7 @@ void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr) }//if regOperPtr->firstAttrinbufrec = RNIL; regOperPtr->lastAttrinbufrec = RNIL; + regOperPtr->m_any_value = 0; }//Dbtup::freeAllAttrBuffers() void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) @@ -46,7 +45,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) Uint32 RnoFree = cnoFreeAttrbufrec; localAttrBufPtr.i = anAttrBuf; while (localAttrBufPtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(localAttrBufPtr, cnoOfAttrbufrec, attrbufrec); Ttemp = localAttrBufPtr.p->attrbuf[ZBUF_NEXT]; localAttrBufPtr.p->attrbuf[ZBUF_NEXT] = cfirstfreeAttrbufrec; @@ -62,7 +61,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) */ void Dbtup::execTUP_ABORTREQ(Signal* signal) { - ljamEntry(); + jamEntry(); do_tup_abortreq(signal, 0); } @@ -80,7 +79,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) (trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) || (trans_state == TRANS_IDLE)); if (regOperPtr.p->op_struct.op_type == ZREAD) { - ljam(); + jam(); freeAllAttrBuffers(regOperPtr.p); initOpConnection(regOperPtr.p); return; @@ -94,7 +93,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) { - ljam(); + jam(); if (!regTabPtr.p->tuxCustomTriggers.isEmpty() && (flags & ZSKIP_TUX_TRIGGERS) == 0) executeTuxAbortTriggers(signal, @@ -105,12 +104,12 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) OperationrecPtr loopOpPtr; loopOpPtr.i = regOperPtr.p->nextActiveOp; while (loopOpPtr.i != RNIL) { - ljam(); + jam(); c_operation_pool.getPtr(loopOpPtr); if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED && !regTabPtr.p->tuxCustomTriggers.isEmpty() && (flags & ZSKIP_TUX_TRIGGERS) == 0) { - ljam(); + jam(); executeTuxAbortTriggers(signal, loopOpPtr.p, regFragPtr.p, @@ -145,7 +144,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) { if(copy_bits & Tuple_header::MM_GROWN) { - ndbout_c("abort grow"); + if (0) ndbout_c("abort grow"); Ptr<Page> vpage; Uint32 idx= regOperPtr.p->m_tuple_location.m_page_idx; Uint32 mm_vars= regTabPtr.p->m_attributes[MM].m_no_of_varsize; @@ -169,7 +168,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) } else if(bits & Tuple_header::MM_SHRINK) { - ndbout_c("abort shrink"); + if (0) ndbout_c("abort shrink"); } } else if (regOperPtr.p->is_first_operation() && @@ -212,116 +211,116 @@ int Dbtup::TUPKEY_abort(Signal* signal, int error_type) case 1: //tmupdate_alloc_error: terrorCode= ZMEM_NOMEM_ERROR; - ljam(); + jam(); break; case 15: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 16: - ljam(); + jam(); terrorCode = ZTRY_TO_UPDATE_ERROR; break; case 17: - ljam(); + jam(); terrorCode = ZNO_ILLEGAL_NULL_ATTR; break; case 19: - ljam(); + jam(); terrorCode = ZTRY_TO_UPDATE_ERROR; break; case 20: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 22: - ljam(); + jam(); terrorCode = ZTOTAL_LEN_ERROR; break; case 23: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 24: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 26: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 27: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 28: - ljam(); + jam(); terrorCode = ZREGISTER_INIT_ERROR; break; case 29: - ljam(); + jam(); break; case 30: - ljam(); + jam(); terrorCode = ZCALL_ERROR; break; case 31: - ljam(); + jam(); terrorCode = ZSTACK_OVERFLOW_ERROR; break; case 32: - ljam(); + jam(); terrorCode = ZSTACK_UNDERFLOW_ERROR; break; case 33: - ljam(); + jam(); terrorCode = ZNO_INSTRUCTION_ERROR; break; case 34: - ljam(); + jam(); terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR; break; case 35: - ljam(); + jam(); terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR; break; case 38: - ljam(); + jam(); terrorCode = ZTEMPORARY_RESOURCE_FAILURE; break; case 39: if (get_trans_state(operPtr.p) == TRANS_TOO_MUCH_AI) { - ljam(); + jam(); terrorCode = ZTOO_MUCH_ATTRINFO_ERROR; } else if (get_trans_state(operPtr.p) == TRANS_ERROR_WAIT_TUPKEYREQ) { - ljam(); + jam(); terrorCode = ZSEIZE_ATTRINBUFREC_ERROR; } else { ndbrequire(false); }//if break; case 40: - ljam(); + jam(); terrorCode = ZUNSUPPORTED_BRANCH; break; default: diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp index 60c0c22ae6c..adac4d3d460 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp @@ -14,28 +14,26 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_BUFFER_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> #include <signaldata/TransIdAI.hpp> -#define ljam() { jamLine(2000 + __LINE__); } -#define ljamEntry() { jamEntryLine(2000 + __LINE__); } - void Dbtup::execSEND_PACKED(Signal* signal) { Uint16 hostId; Uint32 i; Uint32 TpackedListIndex= cpackedListIndex; - ljamEntry(); + jamEntry(); for (i= 0; i < TpackedListIndex; i++) { - ljam(); + jam(); hostId= cpackedList[i]; ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero Uint32 TpacketTA= hostBuffer[hostId].noOfPacketsTA; if (TpacketTA != 0) { - ljam(); + jam(); BlockReference TBref= numberToRef(API_PACKED, hostId); Uint32 TpacketLen= hostBuffer[hostId].packetLenTA; MEMCOPY_NO_WORDS(&signal->theData[0], @@ -73,7 +71,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef, // There is still space in the buffer. We will copy it into the // buffer. // ---------------------------------------------------------------- - ljam(); + jam(); updatePackedList(signal, hostId); } else if (false && TnoOfPackets == 1) { // ---------------------------------------------------------------- @@ -118,7 +116,7 @@ void Dbtup::updatePackedList(Signal* signal, Uint16 hostId) { if (hostBuffer[hostId].inPackedList == false) { Uint32 TpackedListIndex= cpackedListIndex; - ljam(); + jam(); hostBuffer[hostId].inPackedList= true; cpackedList[TpackedListIndex]= hostId; cpackedListIndex= TpackedListIndex + 1; @@ -149,7 +147,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){ // Use error insert to turn routing on - ljam(); + jam(); connectedToNode= false; } @@ -167,18 +165,18 @@ void Dbtup::sendReadAttrinfo(Signal* signal, * Own node -> execute direct */ if(nodeId != getOwnNodeId()){ - ljam(); + jam(); /** * Send long sig */ if (ToutBufIndex >= 22 && is_api && !old_dest) { - ljam(); + jam(); /** * Flush buffer so that order is maintained */ if (TpacketTA != 0) { - ljam(); + jam(); BlockReference TBref = numberToRef(API_PACKED, nodeId); MEMCOPY_NO_WORDS(&signal->theData[0], &hostBuffer[nodeId].packetBufferTA[0], @@ -202,7 +200,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, */ #ifndef NDB_NO_DROPPED_SIGNAL if (ToutBufIndex < 22 && is_api){ - ljam(); + jam(); bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex); return; } @@ -214,7 +212,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 * src= signal->theData+25; if (ToutBufIndex >= 22){ do { - ljam(); + jam(); MEMCOPY_NO_WORDS(&signal->theData[3], src, 22); sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB); ToutBufIndex -= 22; @@ -223,14 +221,14 @@ void Dbtup::sendReadAttrinfo(Signal* signal, } if (ToutBufIndex > 0){ - ljam(); + jam(); MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex); sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB); } return; } EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex); - ljamEntry(); + jamEntry(); return; } @@ -242,7 +240,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 routeBlockref= req_struct->TC_ref; if (true){ // TODO is_api && !old_dest){ - ljam(); + jam(); transIdAI->attrData[0]= recBlockref; LinearSectionPtr ptr[3]; ptr[0].p= &signal->theData[25]; @@ -260,7 +258,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 sent= 0; Uint32 maxLen= TransIdAI::DataLength - 1; while (sent < tot) { - ljam(); + jam(); Uint32 dataLen= (tot - sent > maxLen) ? maxLen : tot - sent; Uint32 sigLen= dataLen + TransIdAI::HeaderLength + 1; MEMCOPY_NO_WORDS(&transIdAI->attrData, diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index dc0be538807..812f071e037 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_COMMIT_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -21,16 +22,13 @@ #include <signaldata/TupCommit.hpp> #include "../dblqh/Dblqh.hpp" -#define ljam() { jamLine(5000 + __LINE__); } -#define ljamEntry() { jamEntryLine(5000 + __LINE__); } - void Dbtup::execTUP_DEALLOCREQ(Signal* signal) { TablerecPtr regTabPtr; FragrecordPtr regFragPtr; Uint32 frag_page_id, frag_id; - ljamEntry(); + jamEntry(); frag_id= signal->theData[0]; regTabPtr.i= signal->theData[1]; @@ -62,7 +60,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal) if (regTabPtr.p->m_attributes[MM].m_no_of_varsize) { - ljam(); + jam(); free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr); } else { free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p); @@ -78,7 +76,7 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal) Uint32 gci= signal->theData[1]; c_operation_pool.getPtr(loopOpPtr); while (loopOpPtr.p->prevActiveOp != RNIL) { - ljam(); + jam(); loopOpPtr.i= loopOpPtr.p->prevActiveOp; c_operation_pool.getPtr(loopOpPtr); } @@ -87,11 +85,11 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal) signal->theData[0]= loopOpPtr.p->userpointer; signal->theData[1]= gci; if (loopOpPtr.p->nextActiveOp == RNIL) { - ljam(); + jam(); EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); return; } - ljam(); + jam(); EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); jamEntry(); loopOpPtr.i= loopOpPtr.p->nextActiveOp; @@ -113,16 +111,16 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr, if (regOperPtr->op_struct.in_active_list) { regOperPtr->op_struct.in_active_list= false; if (regOperPtr->nextActiveOp != RNIL) { - ljam(); + jam(); raoOperPtr.i= regOperPtr->nextActiveOp; c_operation_pool.getPtr(raoOperPtr); raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp; } else { - ljam(); + jam(); tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp; } if (regOperPtr->prevActiveOp != RNIL) { - ljam(); + jam(); raoOperPtr.i= regOperPtr->prevActiveOp; c_operation_pool.getPtr(raoOperPtr); raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp; @@ -349,7 +347,7 @@ Dbtup::disk_page_commit_callback(Signal* signal, Uint32 gci; OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr, opPtrI); c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); @@ -386,7 +384,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal, Uint32 gci; OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr, opPtrI); c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); @@ -455,7 +453,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); regOperPtr.i= tupCommitReq->opPtr; - ljamEntry(); + jamEntry(); c_operation_pool.getPtr(regOperPtr); if(!regOperPtr.p->is_first_operation()) @@ -531,7 +529,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal) c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id, regOperPtr.p->m_undo_buffer_space); - ndbout_c("insert+delete"); + if (0) ndbout_c("insert+delete"); goto skip_disk; } } @@ -620,7 +618,7 @@ skip_disk: * why can't we instead remove "own version" (when approriate ofcourse) */ if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) { - ljam(); + jam(); OperationrecPtr loopPtr= regOperPtr; while(loopPtr.i != RNIL) { @@ -675,18 +673,18 @@ Dbtup::set_change_mask_info(KeyReqStruct * const req_struct, { ChangeMaskState state = get_change_mask_state(regOperPtr); if (state == USE_SAVED_CHANGE_MASK) { - ljam(); + jam(); req_struct->changeMask.setWord(0, regOperPtr->saved_change_mask[0]); req_struct->changeMask.setWord(1, regOperPtr->saved_change_mask[1]); } else if (state == RECALCULATE_CHANGE_MASK) { - ljam(); + jam(); // Recompute change mask, for now set all bits req_struct->changeMask.set(); } else if (state == SET_ALL_MASK) { - ljam(); + jam(); req_struct->changeMask.set(); } else { - ljam(); + jam(); ndbrequire(state == DELETE_CHANGES); req_struct->changeMask.set(); } @@ -706,17 +704,17 @@ Dbtup::calculateChangeMask(Page* const pagePtr, ndbrequire(loopOpPtr.p->op_struct.op_type == ZUPDATE); ChangeMaskState change_mask= get_change_mask_state(loopOpPtr.p); if (change_mask == USE_SAVED_CHANGE_MASK) { - ljam(); + jam(); saved_word1|= loopOpPtr.p->saved_change_mask[0]; saved_word2|= loopOpPtr.p->saved_change_mask[1]; } else if (change_mask == RECALCULATE_CHANGE_MASK) { - ljam(); + jam(); //Recompute change mask, for now set all bits req_struct->changeMask.set(); return; } else { ndbrequire(change_mask == SET_ALL_MASK); - ljam(); + jam(); req_struct->changeMask.set(); return; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp index 9b60d5d47ed..8e532ae97b5 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_DEBUG_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -24,9 +25,6 @@ #include <signaldata/EventReport.hpp> #include <Vector.hpp> -#define ljam() { jamLine(30000 + __LINE__); } -#define ljamEntry() { jamEntryLine(30000 + __LINE__); } - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ------------------------ DEBUG MODULE -------------------------- */ @@ -35,7 +33,7 @@ void Dbtup::execDEBUG_SIG(Signal* signal) { PagePtr regPagePtr; - ljamEntry(); + jamEntry(); regPagePtr.i = signal->theData[0]; c_page_pool.getPtr(regPagePtr); }//Dbtup::execDEBUG_SIG() @@ -281,18 +279,18 @@ void Dbtup::execMEMCHECKREQ(Signal* signal) PagePtr regPagePtr; Uint32* data = &signal->theData[0]; - ljamEntry(); + jamEntry(); BlockReference blockref = signal->theData[0]; Uint32 i; for (i = 0; i < 25; i++) { - ljam(); + jam(); data[i] = 0; }//for for (i = 0; i < 16; i++) { regPagePtr.i = cfreepageList[i]; - ljam(); + jam(); while (regPagePtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(regPagePtr, cnoOfPage, cpage); regPagePtr.i = regPagePtr.p->next_page; data[0]++; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index a235e02a4b7..8420e7f2bde 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_DISK_ALLOC_CPP #include "Dbtup.hpp" static bool f_undo_done = true; @@ -153,12 +154,10 @@ Dbtup::Disk_alloc_info::Disk_alloc_info(const Tablerec* tabPtrP, return; Uint32 min_size= 4*tabPtrP->m_offsets[DD].m_fix_header_size; - Uint32 var_size= tabPtrP->m_offsets[DD].m_max_var_offset; if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0) { Uint32 recs_per_page= (4*Tup_fixsize_page::DATA_WORDS)/min_size; - Uint32 pct_free= 0; m_page_free_bits_map[0] = recs_per_page; // 100% free m_page_free_bits_map[1] = 1; m_page_free_bits_map[2] = 0; @@ -317,7 +316,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) 0, 0, 0); unsigned uncommitted, committed; uncommitted = committed = ~(unsigned)0; - int ret = tsman.get_page_free_bits(&page, &uncommitted, &committed); + (void) tsman.get_page_free_bits(&page, &uncommitted, &committed); jamEntry(); idx = alloc.calc_page_free_bits(real_free); @@ -862,9 +861,6 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr) if (DBG_DISK) ndbout << " disk_page_set_dirty " << key << endl; - Uint32 tableId = pagePtr.p->m_table_id; - Uint32 fragId = pagePtr.p->m_fragment_id; - Ptr<Tablerec> tabPtr; tabPtr.i= pagePtr.p->m_table_id; ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); @@ -1030,8 +1026,6 @@ Dbtup::disk_page_alloc(Signal* signal, Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; Uint64 lsn; - Uint32 old_free = pagePtr.p->free_space; - Uint32 old_bits= alloc.calc_page_free_bits(old_free); if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0) { ddassert(pagePtr.p->uncommitted_used_space > 0); @@ -1063,7 +1057,6 @@ Dbtup::disk_page_free(Signal *signal, Uint32 logfile_group_id= fragPtrP->m_logfile_group_id; Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; Uint32 old_free= pagePtr.p->free_space; - Uint32 old_bits= alloc.calc_page_free_bits(old_free); Uint32 sz; Uint64 lsn; @@ -1090,7 +1083,6 @@ Dbtup::disk_page_free(Signal *signal, } Uint32 new_free = pagePtr.p->free_space; - Uint32 new_bits = alloc.calc_page_free_bits(new_free); Uint32 ext = pagePtr.p->m_extent_info_ptr; Uint32 used = pagePtr.p->uncommitted_used_space; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index bba894db7fd..45766e5e9c4 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -101,6 +101,7 @@ void Dbtup::copyAttrinfo(Operationrec * regOperPtr, regOperPtr->storedProcedureId= RNIL; regOperPtr->firstAttrinbufrec= RNIL; regOperPtr->lastAttrinbufrec= RNIL; + regOperPtr->m_any_value= 0; } void Dbtup::handleATTRINFOforTUPKEYREQ(Signal* signal, @@ -283,6 +284,8 @@ Dbtup::insertActiveOpList(OperationrecPtr regOperPtr, regOperPtr.p->saved_change_mask[0] = prevOpPtr.p->saved_change_mask[0]; regOperPtr.p->saved_change_mask[1] = prevOpPtr.p->saved_change_mask[1]; + regOperPtr.p->m_any_value = prevOpPtr.p->m_any_value; + prevOpPtr.p->op_struct.m_wait_log_buffer= 0; prevOpPtr.p->op_struct.m_load_diskpage_on_commit= 0; @@ -447,7 +450,6 @@ Dbtup::load_diskpage(Signal* signal, Tuple_header* ptr= (Tuple_header*)tmp; int res= 1; - Uint32 opPtr= ptr->m_operation_ptr_i; if(ptr->m_header_bits & Tuple_header::DISK_PART) { Page_cache_client::Request req; @@ -532,7 +534,6 @@ Dbtup::load_diskpage_scan(Signal* signal, Tuple_header* ptr= (Tuple_header*)tmp; int res= 1; - Uint32 opPtr= ptr->m_operation_ptr_i; if(ptr->m_header_bits & Tuple_header::DISK_PART) { Page_cache_client::Request req; @@ -809,8 +810,11 @@ void Dbtup::execTUPKEYREQ(Signal* signal) else if(Roptype == ZDELETE) { jam(); + req_struct.log_size= 0; if (handleDeleteReq(signal, regOperPtr, - regFragPtr, regTabPtr, &req_struct) == -1) { + regFragPtr, regTabPtr, + &req_struct, + disk_page != RNIL) == -1) { return; } /* @@ -825,7 +829,6 @@ void Dbtup::execTUPKEYREQ(Signal* signal) regOperPtr, regTabPtr); set_change_mask_state(regOperPtr, DELETE_CHANGES); - req_struct.log_size= 0; sendTUPKEYCONF(signal, &req_struct, regOperPtr); return; } @@ -1481,7 +1484,8 @@ int Dbtup::handleDeleteReq(Signal* signal, Operationrec* regOperPtr, Fragrecord* regFragPtr, Tablerec* regTabPtr, - KeyReqStruct *req_struct) + KeyReqStruct *req_struct, + bool disk) { // delete must set but not increment tupVersion if (!regOperPtr->is_first_operation()) @@ -1497,6 +1501,7 @@ int Dbtup::handleDeleteReq(Signal* signal, goto error; } memcpy(dst, org, regTabPtr->total_rec_size << 2); + req_struct->m_tuple_ptr = (Tuple_header*)dst; } else { @@ -1533,8 +1538,20 @@ int Dbtup::handleDeleteReq(Signal* signal, { return 0; } + + if (regTabPtr->need_expand(disk)) + prepare_read(req_struct, regTabPtr, disk); - return handleReadReq(signal, regOperPtr, regTabPtr, req_struct); + { + Uint32 RlogSize; + int ret= handleReadReq(signal, regOperPtr, regTabPtr, req_struct); + if (ret == 0 && (RlogSize= req_struct->log_size)) + { + jam(); + sendLogAttrinfo(signal, RlogSize, regOperPtr); + } + return ret; + } error: tupkeyErrorLab(signal); @@ -1838,7 +1855,7 @@ int Dbtup::interpreterNextLab(Signal* signal, Uint32 RstackPtr= 0; union { Uint32 TregMemBuffer[32]; - Uint64 Tdummy[16]; + Uint64 align[16]; }; Uint32 TstackMemBuffer[32]; @@ -2835,13 +2852,19 @@ Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct, if(needed <= alloc) { //ndbassert(!regOperPtr->is_first_operation()); - ndbout_c(" no grow"); + if (0) ndbout_c(" no grow"); return 0; } copy_bits |= Tuple_header::MM_GROWN; if (unlikely(realloc_var_part(regFragPtr, regTabPtr, pagePtr, refptr, alloc, needed))) return -1; + + if (regTabPtr->m_bits & Tablerec::TR_Checksum) + { + jam(); + setChecksum(org, regTabPtr); + } } req_struct->m_tuple_ptr->m_header_bits = copy_bits; return 0; @@ -2889,7 +2912,6 @@ Dbtup::nr_read_pk(Uint32 fragPtrI, ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Local_key tmp = *key; - Uint32 pages = fragPtr.p->noOfPages; PagePtr page_ptr; @@ -3100,7 +3122,7 @@ Dbtup::nr_delete(Signal* signal, Uint32 senderData, break; } - ndbout << "DIRECT DISK DELETE: " << disk << endl; + if (0) ndbout << "DIRECT DISK DELETE: " << disk << endl; disk_page_free(signal, tablePtr.p, fragPtr.p, &disk, *(PagePtr*)&disk_page, gci); return 0; @@ -3152,7 +3174,7 @@ Dbtup::nr_delete_page_callback(Signal* signal, break; } - ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl; + if (0) ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl; disk_page_free(signal, tablePtr.p, fragPtr.p, &op.m_disk_ref, pagePtr, op.m_gci); @@ -3184,7 +3206,7 @@ Dbtup::nr_delete_log_buffer_callback(Signal* signal, /** * reset page no */ - ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl; + if (0) ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl; disk_page_free(signal, tablePtr.p, fragPtr.p, &op.m_disk_ref, pagePtr, op.m_gci); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp index 1e9436c2306..900a02cfd72 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp @@ -14,14 +14,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_FIXALLOC_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(6000 + __LINE__); } -#define ljamEntry() { jamEntryLine(6000 + __LINE__); } - // // Fixed Allocator // This module is used to allocate and free fixed size tuples from the @@ -79,7 +77,7 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr, /* ---------------------------------------------------------------- */ pagePtr.i = getEmptyPage(regFragPtr); if (pagePtr.i != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ // We found empty pages on the fragment. Allocate an empty page and // convert it into a tuple header page and put it in thFreeFirst-list. @@ -95,14 +93,14 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr, LocalDLFifoList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst); free_pages.addFirst(pagePtr); } else { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */ /* ---------------------------------------------------------------- */ return 0; } } else { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */ /* COPY PAGE EXISTED. */ @@ -123,7 +121,6 @@ void Dbtup::convertThPage(Fix_page* regPagePtr, Uint32 mm) { Uint32 nextTuple = regTabPtr->m_offsets[mm].m_fix_header_size; - Uint32 endOfList; /* ASSUMES AT LEAST ONE TUPLE HEADER FITS AND THEREFORE NO HANDLING OF ZERO AS EXTREME CASE @@ -194,7 +191,7 @@ void Dbtup::free_fix_rec(Fragrecord* regFragPtr, if(free == 1) { - ljam(); + jam(); PagePtr pagePtr = { (Page*)regPagePtr, key->m_page_no }; LocalDLFifoList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst); ndbrequire(regPagePtr->page_state == ZTH_MM_FULL); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp index 3ea3deda04f..74c7d38bd64 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_GEN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -34,9 +35,6 @@ #define DEBUG(x) { ndbout << "TUP::" << x << endl; } -#define ljam() { jamLine(24000 + __LINE__); } -#define ljamEntry() { jamEntryLine(24000 + __LINE__); } - void Dbtup::initData() { cnoOfAttrbufrec = ZNO_OF_ATTRBUFREC; @@ -80,7 +78,6 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman) addRecSignal(GSN_TUP_ABORTREQ, &Dbtup::execTUP_ABORTREQ); addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR); addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ); // Trigger Signals addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ); @@ -157,21 +154,21 @@ BLOCK_FUNCTIONS(Dbtup) void Dbtup::execCONTINUEB(Signal* signal) { - ljamEntry(); + jamEntry(); Uint32 actionType = signal->theData[0]; Uint32 dataPtr = signal->theData[1]; switch (actionType) { case ZINITIALISE_RECORDS: - ljam(); + jam(); initialiseRecordsLab(signal, dataPtr, signal->theData[2], signal->theData[3]); break; case ZREL_FRAG: - ljam(); + jam(); releaseFragment(signal, dataPtr, signal->theData[2]); break; case ZREPORT_MEMORY_USAGE:{ - ljam(); + jam(); static int c_currentMemUsed = 0; Uint32 cnt = signal->theData[1]; Uint32 tmp = c_page_pool.getSize(); @@ -206,11 +203,11 @@ void Dbtup::execCONTINUEB(Signal* signal) return; } case ZBUILD_INDEX: - ljam(); + jam(); buildIndex(signal, dataPtr); break; case ZTUP_SCAN: - ljam(); + jam(); { ScanOpPtr scanPtr; c_scanOpPool.getPtr(scanPtr, dataPtr); @@ -219,7 +216,7 @@ void Dbtup::execCONTINUEB(Signal* signal) return; case ZFREE_EXTENT: { - ljam(); + jam(); TablerecPtr tabPtr; tabPtr.i= dataPtr; @@ -232,7 +229,7 @@ void Dbtup::execCONTINUEB(Signal* signal) } case ZUNMAP_PAGES: { - ljam(); + jam(); TablerecPtr tabPtr; tabPtr.i= dataPtr; @@ -245,7 +242,7 @@ void Dbtup::execCONTINUEB(Signal* signal) } case ZFREE_VAR_PAGES: { - ljam(); + jam(); drop_fragment_free_var_pages(signal); return; } @@ -262,19 +259,19 @@ void Dbtup::execCONTINUEB(Signal* signal) /* **************************************************************** */ void Dbtup::execSTTOR(Signal* signal) { - ljamEntry(); + jamEntry(); Uint32 startPhase = signal->theData[1]; Uint32 sigKey = signal->theData[6]; switch (startPhase) { case ZSTARTPHASE1: - ljam(); + jam(); ndbrequire((c_lqh= (Dblqh*)globalData.getBlock(DBLQH)) != 0); ndbrequire((c_tsman= (Tsman*)globalData.getBlock(TSMAN)) != 0); ndbrequire((c_lgman= (Lgman*)globalData.getBlock(LGMAN)) != 0); cownref = calcTupBlockRef(0); break; default: - ljam(); + jam(); break; }//switch signal->theData[0] = sigKey; @@ -297,7 +294,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal) Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); - ljamEntry(); + jamEntry(); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); @@ -435,58 +432,58 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData, { switch (switchData) { case 0: - ljam(); + jam(); initializeHostBuffer(); break; case 1: - ljam(); + jam(); initializeOperationrec(); break; case 2: - ljam(); + jam(); initializePage(); break; case 3: - ljam(); + jam(); break; case 4: - ljam(); + jam(); initializeTablerec(); break; case 5: - ljam(); + jam(); break; case 6: - ljam(); + jam(); initializeFragrecord(); break; case 7: - ljam(); + jam(); initializeFragoperrec(); break; case 8: - ljam(); + jam(); initializePageRange(); break; case 9: - ljam(); + jam(); initializeTabDescr(); break; case 10: - ljam(); + jam(); break; case 11: - ljam(); + jam(); break; case 12: - ljam(); + jam(); initializeAttrbufrec(); break; case 13: - ljam(); + jam(); break; case 14: - ljam(); + jam(); { ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); @@ -510,28 +507,28 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData, void Dbtup::execNDB_STTOR(Signal* signal) { - ljamEntry(); + jamEntry(); cndbcntrRef = signal->theData[0]; Uint32 ownNodeId = signal->theData[1]; Uint32 startPhase = signal->theData[2]; switch (startPhase) { case ZSTARTPHASE1: - ljam(); + jam(); cownNodeId = ownNodeId; cownref = calcTupBlockRef(ownNodeId); break; case ZSTARTPHASE2: - ljam(); + jam(); break; case ZSTARTPHASE3: - ljam(); + jam(); startphase3Lab(signal, ~0, ~0); break; case ZSTARTPHASE4: - ljam(); + jam(); break; case ZSTARTPHASE6: - ljam(); + jam(); /*****************************************/ /* NOW SET THE DISK WRITE SPEED TO */ /* PAGES PER TICK AFTER SYSTEM */ @@ -542,7 +539,7 @@ void Dbtup::execNDB_STTOR(Signal* signal) sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); break; default: - ljam(); + jam(); break; }//switch signal->theData[0] = cownref; @@ -619,7 +616,7 @@ void Dbtup::initializeTablerec() { TablerecPtr regTabPtr; for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) { - ljam(); + jam(); refresh_watch_dog(); ptrAss(regTabPtr, tablerec); initTab(regTabPtr.p); @@ -690,12 +687,12 @@ void Dbtup::initializeTabDescr() void Dbtup::execTUPSEIZEREQ(Signal* signal) { OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); Uint32 userPtr = signal->theData[0]; BlockReference userRef = signal->theData[1]; if (!c_operation_pool.seize(regOperPtr)) { - ljam(); + jam(); signal->theData[0] = userPtr; signal->theData[1] = ZGET_OPREC_ERROR; sendSignal(userRef, GSN_TUPSEIZEREF, signal, 2, JBB); @@ -705,6 +702,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal) new (regOperPtr.p) Operationrec(); regOperPtr.p->firstAttrinbufrec = RNIL; regOperPtr.p->lastAttrinbufrec = RNIL; + regOperPtr.p->m_any_value = 0; regOperPtr.p->op_struct.op_type = ZREAD; regOperPtr.p->op_struct.in_active_list = false; set_trans_state(regOperPtr.p, TRANS_DISCONNECTED); @@ -729,7 +727,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal) void Dbtup::execTUPRELEASEREQ(Signal* signal) { OperationrecPtr regOperPtr; - ljamEntry(); + jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); set_trans_state(regOperPtr.p, TRANS_DISCONNECTED); @@ -746,32 +744,5 @@ void Dbtup::releaseFragrec(FragrecordPtr regFragPtr) cfirstfreefrag = regFragPtr.i; }//Dbtup::releaseFragrec() -void Dbtup::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend(); - ConfigParamId var = setVarReq->variable(); - int val = setVarReq->value(); - - switch (var) { - - case NoOfDiskPagesToDiskAfterRestartTUP: - clblPagesPerTick = val; - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case NoOfDiskPagesToDiskDuringRestartTUP: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - } // switch -#endif - -}//execSET_VAR_REQ() - - diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp index 8a65e7f7864..0427f1c7612 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_INDEX_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -23,9 +24,6 @@ #include <AttributeHeader.hpp> #include <signaldata/TuxMaint.hpp> -#define ljam() { jamLine(28000 + __LINE__); } -#define ljamEntry() { jamEntryLine(28000 + __LINE__); } - // methods used by ordered index void @@ -34,7 +32,7 @@ Dbtup::tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageIndex, Uint32& tupAddr) { - ljamEntry(); + jamEntry(); PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pageId); Uint32 fragPageId= pagePtr.p->frag_page_id; @@ -48,7 +46,7 @@ Dbtup::tuxAllocNode(Signal* signal, Uint32& pageOffset, Uint32*& node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -61,7 +59,7 @@ Dbtup::tuxAllocNode(Signal* signal, Uint32* ptr, frag_page_id; if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0) { - ljam(); + jam(); terrorCode = ZMEM_NOMEM_ERROR; // caller sets error return terrorCode; } @@ -82,7 +80,7 @@ Dbtup::tuxFreeNode(Signal* signal, Uint32 pageOffset, Uint32* node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -105,7 +103,7 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageOffset, Uint32*& node) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -130,7 +128,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 numAttrs, Uint32* dataOut) { - ljamEntry(); + jamEntry(); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; @@ -150,21 +148,21 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Tuple_header *tuple_ptr= req_struct.m_tuple_ptr; if (tuple_ptr->get_tuple_version() != tupVersion) { - ljam(); + jam(); OperationrecPtr opPtr; opPtr.i= tuple_ptr->m_operation_ptr_i; Uint32 loopGuard= 0; while (opPtr.i != RNIL) { c_operation_pool.getPtr(opPtr); if (opPtr.p->tupVersion == tupVersion) { - ljam(); + jam(); if (!opPtr.p->m_copy_tuple_location.isNull()) { req_struct.m_tuple_ptr= (Tuple_header*) c_undo_buffer.get_ptr(&opPtr.p->m_copy_tuple_location); } break; } - ljam(); + jam(); opPtr.i= opPtr.p->prevActiveOp; ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); } @@ -202,7 +200,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, int Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { - ljamEntry(); + jamEntry(); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; @@ -305,7 +303,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataO int Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { - ljamEntry(); + jamEntry(); // get table TablerecPtr tablePtr; tablePtr.i = tableId; @@ -329,7 +327,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 transId2, Uint32 savePointId) { - ljamEntry(); + jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); @@ -358,9 +356,9 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, * for this transaction and savepoint id. If its tuple version * equals the requested then we have a visible tuple otherwise not. */ - ljam(); + jam(); if (req_struct.m_tuple_ptr->get_tuple_version() == tupVersion) { - ljam(); + jam(); return true; } } @@ -378,7 +376,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI, void Dbtup::execBUILDINDXREQ(Signal* signal) { - ljamEntry(); + jamEntry(); #ifdef TIME_MEASUREMENT time_events= 0; tot_time_passed= 0; @@ -387,7 +385,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) // get new operation BuildIndexPtr buildPtr; if (! c_buildIndexList.seize(buildPtr)) { - ljam(); + jam(); BuildIndexRec buildRec; memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request)); buildRec.m_errorCode= BuildIndxRef::Busy; @@ -402,7 +400,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) do { const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request; if (buildReq->getTableId() >= cnoOfTablerec) { - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; break; } @@ -410,7 +408,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) tablePtr.i= buildReq->getTableId(); ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); if (tablePtr.p->tableStatus != DEFINED) { - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; break; } @@ -418,7 +416,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal) buildPtr.p->m_build_vs = tablePtr.p->m_attributes[MM].m_no_of_varsize > 0; if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) { - ljam(); + jam(); const DLList<TupTriggerData>& triggerList = tablePtr.p->tuxCustomTriggers; @@ -426,13 +424,13 @@ Dbtup::execBUILDINDXREQ(Signal* signal) triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { if (triggerPtr.p->indexId == buildReq->getIndexId()) { - ljam(); + jam(); break; } triggerList.next(triggerPtr); } if (triggerPtr.i == RNIL) { - ljam(); + jam(); // trigger was not created buildPtr.p->m_errorCode = BuildIndxRef::InternalError; break; @@ -440,12 +438,12 @@ Dbtup::execBUILDINDXREQ(Signal* signal) buildPtr.p->m_indexId = buildReq->getIndexId(); buildPtr.p->m_buildRef = DBTUX; } else if(buildReq->getIndexId() == RNIL) { - ljam(); + jam(); // REBUILD of acc buildPtr.p->m_indexId = RNIL; buildPtr.p->m_buildRef = DBACC; } else { - ljam(); + jam(); buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType; break; } @@ -489,7 +487,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) // get fragment FragrecordPtr fragPtr; if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) { - ljam(); + jam(); // build ready buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); @@ -498,7 +496,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE); fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo]; if (fragPtr.i == RNIL) { - ljam(); + jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; @@ -508,7 +506,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) // get page PagePtr pagePtr; if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) { - ljam(); + jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; @@ -519,7 +517,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) Uint32 pageState= pagePtr.p->page_state; // skip empty page if (pageState == ZEMPTY_MM) { - ljam(); + jam(); buildPtr.p->m_pageId++; buildPtr.p->m_tupleNo= firstTupleNo; break; @@ -529,7 +527,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) const Tuple_header* tuple_ptr = 0; pageIndex = buildPtr.p->m_tupleNo * tupheadsize; if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) { - ljam(); + jam(); buildPtr.p->m_pageId++; buildPtr.p->m_tupleNo= firstTupleNo; break; @@ -537,7 +535,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex]; // skip over free tuple if (tuple_ptr->m_header_bits & Tuple_header::FREE) { - ljam(); + jam(); buildPtr.p->m_tupleNo++; break; } @@ -580,7 +578,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) tuple as a copy tuple. The original tuple is stable and is thus preferrable to store in TUX. */ - ljam(); + jam(); /** * Since copy tuples now can't be found on real pages. @@ -609,11 +607,11 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) } while(req->errorCode == 0 && pageOperPtr.i != RNIL); } - ljamEntry(); + jamEntry(); if (req->errorCode != 0) { switch (req->errorCode) { case TuxMaintReq::NoMemError: - ljam(); + jam(); buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure; break; default: @@ -665,7 +663,7 @@ Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP) rep->setIndexId(buildReq->getIndexId()); // conf if (buildPtrP->m_errorCode == BuildIndxRef::NoError) { - ljam(); + jam(); sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF, signal, BuildIndxConf::SignalLength, JBB); return; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 4df3f91068c..c8df5f5154e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_META_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -30,16 +31,13 @@ #include "AttributeOffset.hpp" #include <my_sys.h> -#define ljam() { jamLine(20000 + __LINE__); } -#define ljamEntry() { jamEntryLine(20000 + __LINE__); } - void Dbtup::execTUPFRAGREQ(Signal* signal) { - ljamEntry(); + jamEntry(); TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); if (tupFragReq->userPtr == (Uint32)-1) { - ljam(); + jam(); abortAddFragOp(signal); return; } @@ -54,7 +52,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regTabPtr.i = tupFragReq->tableId; Uint32 noOfAttributes = tupFragReq->noOfAttr; Uint32 fragId = tupFragReq->fragId; - Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; Uint32 noOfCharsets = tupFragReq->noOfCharsets; @@ -72,7 +69,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { - ljam(); + jam(); tupFragReq->userPtr = userptr; tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); @@ -82,7 +79,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { - ljam(); + jam(); tupFragReq->userPtr = userptr; tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); @@ -111,29 +108,29 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) getFragmentrec(regFragPtr, fragId, regTabPtr.p); if (regFragPtr.i != RNIL) { - ljam(); + jam(); terrorCode= ZEXIST_FRAG_ERROR; fragrefuse1Lab(signal, fragOperPtr); return; } if (cfirstfreefrag != RNIL) { - ljam(); + jam(); seizeFragrecord(regFragPtr); } else { - ljam(); + jam(); terrorCode= ZFULL_FRAGRECORD_ERROR; fragrefuse1Lab(signal, fragOperPtr); return; } initFragRange(regFragPtr.p); if (!addfragtotab(regTabPtr.p, fragId, regFragPtr.i)) { - ljam(); + jam(); terrorCode= ZNO_FREE_TAB_ENTRY_ERROR; fragrefuse2Lab(signal, fragOperPtr, regFragPtr); return; } if (cfirstfreerange == RNIL) { - ljam(); + jam(); terrorCode= ZNO_FREE_PAGE_RANGE_ERROR; fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; @@ -150,7 +147,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { - ljam(); + jam(); terrorCode = 1; fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); CLEAR_ERROR_INSERT_VALUE; @@ -158,7 +155,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) } if (regTabPtr.p->tableStatus == NOT_DEFINED) { - ljam(); + jam(); //----------------------------------------------------------------------------- // We are setting up references to the header of the tuple. // Active operation This word contains a reference to the operation active @@ -205,13 +202,13 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) Uint32 offset[10]; Uint32 tableDescriptorRef= allocTabDescr(regTabPtr.p, offset); if (tableDescriptorRef == RNIL) { - ljam(); + jam(); fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); return; } setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset); } else { - ljam(); + jam(); fragOperPtr.p->definingFragment= false; } signal->theData[0]= fragOperPtr.p->lqhPtrFrag; @@ -227,9 +224,9 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragIndex) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == RNIL) { - ljam(); + jam(); regTabPtr->fragid[i]= fragId; regTabPtr->fragrec[i]= fragIndex; return true; @@ -243,9 +240,9 @@ void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Tablerec* const regTabPtr) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == fragId) { - ljam(); + jam(); regFragPtr.i= regTabPtr->fragrec[i]; ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); return; @@ -281,7 +278,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) FragoperrecPtr fragOperPtr; TablerecPtr regTabPtr; - ljamEntry(); + jamEntry(); fragOperPtr.i= signal->theData[0]; ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); Uint32 attrId = signal->theData[2]; @@ -342,7 +339,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) Uint32 attrDes2= 0; if (!AttributeDescriptor::getDynamic(attrDescriptor)) { - ljam(); + jam(); Uint32 pos= 0, null_pos; Uint32 bytes= AttributeDescriptor::getSizeInBytes(attrDescriptor); Uint32 words= (bytes + 3) / 4; @@ -352,7 +349,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) if (AttributeDescriptor::getNullable(attrDescriptor)) { - ljam(); + jam(); fragOperPtr.p->m_null_bits[ind]++; } else @@ -367,17 +364,17 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) switch (AttributeDescriptor::getArrayType(attrDescriptor)) { case NDB_ARRAYTYPE_FIXED: { - ljam(); + jam(); regTabPtr.p->m_attributes[ind].m_no_of_fixsize++; if(attrLen != 0) { - ljam(); + jam(); pos= fragOperPtr.p->m_fix_attributes_size[ind]; fragOperPtr.p->m_fix_attributes_size[ind] += words; } else { - ljam(); + jam(); Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor); fragOperPtr.p->m_null_bits[ind] += bitCount; } @@ -385,7 +382,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) } default: { - ljam(); + jam(); fragOperPtr.p->m_var_attributes_size[ind] += bytes; pos= regTabPtr.p->m_attributes[ind].m_no_of_varsize++; break; @@ -402,13 +399,13 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ndbrequire(cs != NULL); Uint32 i = 0; while (i < fragOperPtr.p->charsetIndex) { - ljam(); + jam(); if (regTabPtr.p->charsetArray[i] == cs) break; i++; } if (i == fragOperPtr.p->charsetIndex) { - ljam(); + jam(); fragOperPtr.p->charsetIndex++; } ndbrequire(i < regTabPtr.p->noOfCharsets); @@ -421,7 +418,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr || ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0|| ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) { - ljam(); + jam(); terrorCode = 1; addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); CLEAR_ERROR_INSERT_VALUE; @@ -432,7 +429,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ if (! lastAttr) { - ljam(); + jam(); signal->theData[0] = fragOperPtr.p->lqhPtrFrag; signal->theData[1] = lastAttr; sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, @@ -571,7 +568,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); if (noAllocatedPages == 0) { - ljam(); + jam(); terrorCode = ZNO_PAGES_ALLOCATED_ERROR; addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); return; @@ -581,7 +578,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CreateFilegroupImplReq rep; if(regTabPtr.p->m_no_of_disk_attributes) { - ljam(); + jam(); Tablespace_client tsman(0, c_tsman, 0, 0, regFragPtr.p->m_tablespace_id); ndbrequire(tsman.get_tablespace_info(&rep) == 0); @@ -598,12 +595,12 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) if (regTabPtr.p->m_no_of_disk_attributes) { - ljam(); + jam(); if(!(getNodeState().startLevel == NodeState::SL_STARTING && getNodeState().starting.startPhase <= 4)) { Callback cb; - ljam(); + jam(); cb.m_callbackData= fragOperPtr.i; cb.m_callbackFunction = @@ -611,13 +608,17 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) Uint32 sz= sizeof(Disk_undo::Create) >> 2; Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); - int r0 = c_lgman->alloc_log_space(regFragPtr.p->m_logfile_group_id, - sz); + if((terrorCode = + c_lgman->alloc_log_space(regFragPtr.p->m_logfile_group_id, sz))) + { + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + } int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); signal->theData[0] = 1; return; case -1: @@ -736,11 +737,11 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr) Uint32* keyArray= &tableDescriptor[regTabPtr->readKeyArray].tabDescr; Uint32 countKeyAttr= 0; for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - ljam(); + jam(); Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); Uint32 attrDescriptor= getTabDescrWord(refAttr); if (AttributeDescriptor::getPrimaryKey(attrDescriptor)) { - ljam(); + jam(); AttributeHeader::init(&keyArray[countKeyAttr], i, 0); countKeyAttr++; } @@ -760,7 +761,7 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr) { for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - ljam(); + jam(); Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); Uint32 desc = getTabDescrWord(refAttr); Uint32 t = 0; @@ -855,9 +856,9 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr) void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId) { for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (regTabPtr->fragid[i] == fragId) { - ljam(); + jam(); regTabPtr->fragid[i]= RNIL; regTabPtr->fragrec[i]= RNIL; return; @@ -883,7 +884,7 @@ void Dbtup::abortAddFragOp(Signal* signal) void Dbtup::execDROP_TAB_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); if (ERROR_INSERTED(4013)) { #ifdef VM_TRACE verifytabdes(); @@ -909,7 +910,7 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr) { Uint32 descriptor= regTabPtr->readKeyArray; if (descriptor != RNIL) { - ljam(); + jam(); Uint32 offset[10]; getTabDescrOffsets(regTabPtr, offset); @@ -940,16 +941,16 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId, Uint32 fragId = RNIL; Uint32 i = 0; for (i = 0; i < MAX_FRAG_PER_NODE; i++) { - ljam(); + jam(); if (tabPtr.p->fragid[i] != RNIL) { - ljam(); + jam(); fragIndex= tabPtr.p->fragrec[i]; fragId= tabPtr.p->fragid[i]; break; } } if (fragIndex != RNIL) { - ljam(); + jam(); signal->theData[0] = ZUNMAP_PAGES; signal->theData[1] = tabPtr.i; @@ -979,7 +980,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId, int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); return; case -1: warningEvent("Failed to get log buffer for drop table: %u", @@ -1119,14 +1120,14 @@ Dbtup::drop_fragment_free_extent(Signal *signal, safe_cast(&Dbtup::drop_fragment_free_extent_log_buffer_callback); #if NOT_YET_UNDO_FREE_EXTENT Uint32 sz= sizeof(Disk_undo::FreeExtent) >> 2; - int r0 = c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz); + (void) c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz); Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: - ljam(); + jam(); return; case -1: ndbrequire("NOT YET IMPLEMENTED" == 0); @@ -1278,7 +1279,7 @@ Dbtup::drop_fragment_free_extent_log_buffer_callback(Signal* signal, void Dbtup::drop_fragment_free_var_pages(Signal* signal) { - ljam(); + jam(); Uint32 tableId = signal->theData[1]; Uint32 fragPtrI = signal->theData[2]; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp index cad9850a658..24806062fcf 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp @@ -14,14 +14,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_PAG_MAN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(16000 + __LINE__); } -#define ljamEntry() { jamEntryLine(16000 + __LINE__); } - /* ---------------------------------------------------------------- */ // 4) Page Memory Manager (buddy algorithm) // @@ -121,7 +119,7 @@ void Dbtup::initializePage() }//for PagePtr pagePtr; for (pagePtr.i = 0; pagePtr.i < c_page_pool.getSize(); pagePtr.i++) { - ljam(); + jam(); refresh_watch_dog(); c_page_pool.getPtr(pagePtr); pagePtr.p->physical_page_id= RNIL; @@ -160,16 +158,16 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, fc_left = fc_right = fc_remove = 0; #endif if (noOfPagesToAllocate == 0){ - ljam(); + jam(); noOfPagesAllocated = 0; return; }//if Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1); for (Uint32 i = firstListToCheck; i < 16; i++) { - ljam(); + jam(); if (cfreepageList[i] != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */ /* AREA AND RETURN THE PART NOT NEEDED. */ @@ -189,11 +187,11 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, /* ---------------------------------------------------------------- */ if (firstListToCheck) { - ljam(); + jam(); for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) { - ljam(); + jam(); if (cfreepageList[j] != RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */ /* ---------------------------------------------------------------- */ @@ -219,9 +217,9 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, void Dbtup::returnCommonArea(Uint32 retPageRef, Uint32 retNo) { do { - ljam(); + jam(); if (retNo == 0) { - ljam(); + jam(); return; }//if Uint32 list = nextHigherTwoLog(retNo) - 1; @@ -241,28 +239,28 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef, while (allocPageRef > 0 && ++loop < 16) { - ljam(); + jam(); pageLastPtr.i = allocPageRef - 1; c_page_pool.getPtr(pageLastPtr); if (pageLastPtr.p->page_state != ZFREE_COMMON) { - ljam(); + jam(); return; } else { - ljam(); + jam(); pageFirstPtr.i = pageLastPtr.p->first_cluster_page; ndbrequire(pageFirstPtr.i != RNIL); Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); removeCommonArea(pageFirstPtr.i, list); Uint32 listSize = 1 << list; if (listSize > remainAllocate) { - ljam(); + jam(); Uint32 retNo = listSize - remainAllocate; returnCommonArea(pageFirstPtr.i, retNo); allocPageRef = pageFirstPtr.i + retNo; noPagesAllocated = noOfPagesToAllocate; return; } else { - ljam(); + jam(); allocPageRef = pageFirstPtr.i; noPagesAllocated += listSize; remainAllocate -= listSize; @@ -281,35 +279,35 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef, PagePtr pageFirstPtr, pageLastPtr; Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated; if (remainAllocate == 0) { - ljam(); + jam(); return; }//if Uint32 loop = 0; while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize() && ++loop < 16) { - ljam(); + jam(); pageFirstPtr.i = allocPageRef + noPagesAllocated; c_page_pool.getPtr(pageFirstPtr); if (pageFirstPtr.p->page_state != ZFREE_COMMON) { - ljam(); + jam(); return; } else { - ljam(); + jam(); pageLastPtr.i = pageFirstPtr.p->last_cluster_page; ndbrequire(pageLastPtr.i != RNIL); Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); removeCommonArea(pageFirstPtr.i, list); Uint32 listSize = 1 << list; if (listSize > remainAllocate) { - ljam(); + jam(); Uint32 retPageRef = pageFirstPtr.i + remainAllocate; Uint32 retNo = listSize - remainAllocate; returnCommonArea(retPageRef, retNo); noPagesAllocated += remainAllocate; return; } else { - ljam(); + jam(); noPagesAllocated += listSize; remainAllocate -= listSize; }//if @@ -357,12 +355,12 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) c_page_pool.getPtr(remPagePtr, remPageRef); ndbrequire(list < 16); if (cfreepageList[list] == remPagePtr.i) { - ljam(); + jam(); ndbassert(remPagePtr.p->prev_cluster_page == RNIL); cfreepageList[list] = remPagePtr.p->next_cluster_page; pageNextPtr.i = cfreepageList[list]; if (pageNextPtr.i != RNIL) { - ljam(); + jam(); c_page_pool.getPtr(pageNextPtr); pageNextPtr.p->prev_cluster_page = RNIL; }//if @@ -373,7 +371,7 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) pagePrevPtr.p->next_cluster_page = pageNextPtr.i; if (pageNextPtr.i != RNIL) { - ljam(); + jam(); c_page_pool.getPtr(pageNextPtr); pageNextPtr.p->prev_cluster_page = pagePrevPtr.i; } diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index ed8f63ce3ad..cde63091cfb 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_PAGE_MAP_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(14000 + __LINE__); } -#define ljamEntry() { jamEntryLine(14000 + __LINE__); } - // // PageMap is a service used by Dbtup to map logical page id's to physical // page id's. The mapping is needs the fragment and the logical page id to @@ -92,11 +90,11 @@ Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr) { Uint32 pageId = regFragPtr->emptyPrimPage.firstItem; if (pageId == RNIL) { - ljam(); + jam(); allocMoreFragPages(regFragPtr); pageId = regFragPtr->emptyPrimPage.firstItem; if (pageId == RNIL) { - ljam(); + jam(); return RNIL; }//if }//if @@ -122,11 +120,11 @@ Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId) loopLimit = grpPageRangePtr.p->currentIndexPos; ndbrequire(loopLimit <= 3); for (Uint32 i = 0; i <= loopLimit; i++) { - ljam(); + jam(); if (grpPageRangePtr.p->startRange[i] <= logicalPageId) { if (grpPageRangePtr.p->endRange[i] >= logicalPageId) { if (grpPageRangePtr.p->type[i] == ZLEAF) { - ljam(); + jam(); Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) + grpPageRangePtr.p->basePageId[i]; return realPageId; @@ -167,12 +165,12 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, { PageRangePtr currPageRangePtr; if (cfirstfreerange == RNIL) { - ljam(); + jam(); return false; }//if currPageRangePtr.i = regFragPtr->currentPageRange; if (currPageRangePtr.i == RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */ /* ---------------------------------------------------------------- */ @@ -181,10 +179,10 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, currPageRangePtr.p->currentIndexPos = 0; currPageRangePtr.p->parentPtr = RNIL; } else { - ljam(); + jam(); ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange); if (currPageRangePtr.p->currentIndexPos < 3) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */ /* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */ @@ -192,7 +190,7 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, /* ---------------------------------------------------------------- */ currPageRangePtr.p->currentIndexPos++; } else { - ljam(); + jam(); ndbrequire(currPageRangePtr.p->currentIndexPos == 3); currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr); if (currPageRangePtr.i == RNIL) { @@ -223,15 +221,15 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, PageRangePtr loopPageRangePtr; loopPageRangePtr = currPageRangePtr; while (true) { - ljam(); + jam(); loopPageRangePtr.i = loopPageRangePtr.p->parentPtr; if (loopPageRangePtr.i != RNIL) { - ljam(); + jam(); ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange); ndbrequire(loopPageRangePtr.p->currentIndexPos < 4); loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages; } else { - ljam(); + jam(); break; }//if }//while @@ -243,26 +241,26 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, void Dbtup::releaseFragPages(Fragrecord* regFragPtr) { if (regFragPtr->rootPageRange == RNIL) { - ljam(); + jam(); return; }//if PageRangePtr regPRPtr; regPRPtr.i = regFragPtr->rootPageRange; ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); while (true) { - ljam(); + jam(); const Uint32 indexPos = regPRPtr.p->currentIndexPos; ndbrequire(indexPos < 4); const Uint32 basePageId = regPRPtr.p->basePageId[indexPos]; regPRPtr.p->basePageId[indexPos] = RNIL; if (basePageId == RNIL) { - ljam(); + jam(); /** * Finished with indexPos continue with next */ if (indexPos > 0) { - ljam(); + jam(); regPRPtr.p->currentIndexPos--; continue; }//if @@ -274,13 +272,13 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr) releasePagerange(regPRPtr); if (parentPtr != RNIL) { - ljam(); + jam(); regPRPtr.i = parentPtr; ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); continue; }//if - ljam(); + jam(); ndbrequire(regPRPtr.i == regFragPtr->rootPageRange); initFragRange(regFragPtr); for (Uint32 i = 0; i<MAX_FREE_LIST; i++) @@ -364,7 +362,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested Uint32 retPageRef = RNIL; allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef); if (noOfPagesAllocated == 0) { - ljam(); + jam(); return tafpPagesAllocated; }//if /* ---------------------------------------------------------------- */ @@ -373,7 +371,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* ---------------------------------------------------------------- */ Uint32 startRange = regFragPtr->nextStartRange; if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) { - ljam(); + jam(); returnCommonArea(retPageRef, noOfPagesAllocated); return tafpPagesAllocated; }//if @@ -388,7 +386,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* ---------------------------------------------------------------- */ Uint32 prev = RNIL; for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) { - ljam(); + jam(); c_page_pool.getPtr(loopPagePtr); loopPagePtr.p->page_state = ZEMPTY_MM; loopPagePtr.p->frag_page_id = startRange + @@ -416,10 +414,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested /* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */ /* ---------------------------------------------------------------- */ if (tafpPagesAllocated < tafpNoAllocRequested) { - ljam(); + jam(); } else { ndbrequire(tafpPagesAllocated == tafpNoAllocRequested); - ljam(); + jam(); return tafpNoAllocRequested; }//if }//while @@ -456,15 +454,15 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr parentPageRangePtr = currPageRangePtr; Uint32 tiprNoLevels = 1; while (true) { - ljam(); + jam(); parentPageRangePtr.i = parentPageRangePtr.p->parentPtr; if (parentPageRangePtr.i == RNIL) { - ljam(); + jam(); /* ---------------------------------------------------------------- */ /* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */ /* ---------------------------------------------------------------- */ if (c_noOfFreePageRanges < tiprNoLevels) { - ljam(); + jam(); return RNIL; }//if PageRangePtr oldRootPRPtr; @@ -487,14 +485,14 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr foundPageRangePtr = newRootPRPtr; break; } else { - ljam(); + jam(); ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange); if (parentPageRangePtr.p->currentIndexPos < 3) { - ljam(); + jam(); if (c_noOfFreePageRanges < tiprNoLevels) { - ljam(); + jam(); return RNIL; }//if @@ -510,7 +508,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr foundPageRangePtr = parentPageRangePtr; break; } else { - ljam(); + jam(); ndbrequire(parentPageRangePtr.p->currentIndexPos == 3); /* ---------------------------------------------------------------- */ /* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */ @@ -528,7 +526,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr PageRangePtr prevPageRangePtr; prevPageRangePtr = foundPageRangePtr; if (c_noOfFreePageRanges < tiprNoLevels) { - ljam(); + jam(); return RNIL; }//if /* ---------------------------------------------------------------- */ @@ -539,7 +537,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr /* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */ /* ---------------------------------------------------------------- */ while (true) { - ljam(); + jam(); seizePagerange(newPageRangePtr); tiprNoLevels--; ndbrequire(prevPageRangePtr.p->currentIndexPos < 4); @@ -547,13 +545,13 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr newPageRangePtr.p->parentPtr = prevPageRangePtr.i; newPageRangePtr.p->currentIndexPos = 0; if (tiprNoLevels > 0) { - ljam(); + jam(); newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange; newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1; newPageRangePtr.p->type[0] = ZNON_LEAF; prevPageRangePtr = newPageRangePtr; } else { - ljam(); + jam(); break; }//if }//while @@ -588,16 +586,16 @@ void Dbtup::errorHandler(Uint32 errorCode) { switch (errorCode) { case 0: - ljam(); + jam(); break; case 1: - ljam(); + jam(); break; case 2: - ljam(); + jam(); break; default: - ljam(); + jam(); } ndbrequire(false); }//Dbtup::errorHandler() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index eb94e2c3473..b0b0cec6b76 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_ROUTINES_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -23,9 +24,6 @@ #include "AttributeOffset.hpp" #include <AttributeHeader.hpp> -#define ljam() { jamLine(3000 + __LINE__); } -#define ljamEntry() { jamEntryLine(3000 + __LINE__); } - void Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) { @@ -40,23 +38,23 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ if (!AttributeDescriptor::getNullable(attrDescr)) { if (AttributeDescriptor::getSize(attrDescr) == 0){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL; regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHOneWordNotNULL; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHOneWordNotNULL; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHTwoWordNotNULL; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHTwoWordNotNULL; } else { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHManyWordNotNULL; regTabPtr->updateFunctionArray[i]= @@ -64,27 +62,27 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } // replace functions for char attribute if (AttributeOffset::getCharsetFlag(attrOffset)) { - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL; } } else { if (AttributeDescriptor::getSize(attrDescr) == 0){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4){ - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHTwoWordNULLable; regTabPtr->updateFunctionArray[i]= &Dbtup::updateFixedSizeTHManyWordNULLable; } else { - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readFixedSizeTHManyWordNULLable; regTabPtr->updateFunctionArray[i]= @@ -92,7 +90,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } // replace functions for char attribute if (AttributeOffset::getCharsetFlag(attrOffset)) { - ljam(); + jam(); regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable; regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; } @@ -144,7 +142,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) } } else { if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ - ljam(); + jam(); regTabPtr->readFunctionArray[i]= &Dbtup::readDynFixedSize; regTabPtr->updateFunctionArray[i]= &Dbtup::updateDynFixedSize; } else { @@ -204,7 +202,7 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct, inBufIndex++; attributeId= ahIn.getAttributeId(); descr_index= attributeId << ZAD_LOG_SIZE; - ljam(); + jam(); AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0); ahOut= (AttributeHeader*)&outBuffer[tmpAttrBufIndex]; @@ -223,7 +221,19 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct, return -1; } } else if(attributeId & AttributeHeader::PSEUDO) { - ljam(); + if (attributeId == AttributeHeader::ANY_VALUE) + { + jam(); + Uint32 RlogSize = req_struct->log_size; + operPtr.p->m_any_value = inBuffer[inBufIndex]; + * (clogMemBuffer + RlogSize) = inBuffer[inBufIndex - 1]; + * (clogMemBuffer + RlogSize + 1) = inBuffer[inBufIndex]; + inBufIndex++; + req_struct->out_buf_index = tmpAttrBufIndex; + req_struct->log_size = RlogSize + 2; + continue; + } + jam(); Uint32 sz= read_pseudo(attributeId, req_struct, outBuffer+tmpAttrBufIndex+1); @@ -252,13 +262,13 @@ Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer, ndbrequire(readOffset < req_struct->check_offset[MM]); if (newIndexBuf <= maxRead) { - ljam(); + jam(); outBuffer[indexBuf]= wordRead; ahOut->setDataSize(1); req_struct->out_buf_index= newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -280,14 +290,14 @@ Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer, ndbrequire(readOffset + 1 < req_struct->check_offset[MM]); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize(2); outBuffer[indexBuf]= wordReadFirst; outBuffer[indexBuf + 1]= wordReadSecond; req_struct->out_buf_index= newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -311,7 +321,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, if (! charsetFlag || ! req_struct->xfrm_flag) { Uint32 newIndexBuf = indexBuf + attrNoOfWords; if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); MEMCOPY_NO_WORDS(&outBuffer[indexBuf], &tuple_header[readOffset], @@ -319,11 +329,11 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; }//if } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; @@ -340,8 +350,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, Uint32 dstLen = xmul * (srcBytes - lb); Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); if (maxIndexBuf <= maxRead && ok) { - ljam(); - const char* ssrcPtr = (const char*)srcPtr; + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -354,7 +363,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; } } @@ -368,13 +377,13 @@ Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHOneWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -387,13 +396,13 @@ Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHTwoWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -406,13 +415,13 @@ Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readFixedSizeTHManyWordNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -424,9 +433,9 @@ Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); if (nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); ahOut->setNULL(); } return true; @@ -478,7 +487,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, if (! charsetFlag || ! req_struct->xfrm_flag) { if (new_index <= max_read) { - ljam(); + jam(); ah_out->setByteSize(vsize_in_bytes); out_buffer[index_buf + (vsize_in_bytes >> 2)] = 0; memcpy(out_buffer+index_buf, @@ -490,7 +499,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 maxBytes = AttributeDescriptor::getSizeInBytes(attr_descriptor); Uint32 srcBytes = vsize_in_bytes; @@ -509,8 +518,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, Uint32 dstLen = xmul * (maxBytes - lb); Uint32 maxIndexBuf = index_buf + (dstLen >> 2); if (maxIndexBuf <= max_read && ok) { - ljam(); - const char* ssrcPtr = (const char*)srcPtr; + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -524,7 +532,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer, return true; } } - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -536,13 +544,13 @@ Dbtup::readVarSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readVarSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -554,7 +562,7 @@ Dbtup::readDynFixedSize(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -565,7 +573,7 @@ Dbtup::readDynVarSize(Uint32* outBuffer, AttributeHeader* ahOut, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; }//Dbtup::readDynBigVarSize() @@ -588,7 +596,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, if (! charsetFlag || ! req_struct->xfrm_flag) { Uint32 newIndexBuf = indexBuf + attrNoOfWords; if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); MEMCOPY_NO_WORDS(&outBuffer[indexBuf], &tuple_header[readOffset], @@ -596,11 +604,11 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; }//if } else { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; @@ -617,8 +625,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, Uint32 dstLen = xmul * (srcBytes - lb); Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); if (maxIndexBuf <= maxRead && ok) { - ljam(); - const char* ssrcPtr = (const char*)srcPtr; + jam(); int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); ndbrequire(n != -1); int m = n; @@ -631,7 +638,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, req_struct->out_buf_index = newIndexBuf; return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; } } @@ -645,13 +652,13 @@ Dbtup::readDiskFixedSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!disk_nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readDiskFixedSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -680,7 +687,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer, ndbrequire(vsize_in_words <= max_var_size); if (new_index <= max_read) { - ljam(); + jam(); ah_out->setByteSize(vsize_in_bytes); memcpy(out_buffer+index_buf, req_struct->m_var_data[DD].m_data_ptr+var_attr_pos, @@ -688,7 +695,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer, req_struct->out_buf_index= new_index; return true; } else { - ljam(); + jam(); terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; return false; } @@ -701,13 +708,13 @@ Dbtup::readDiskVarSizeNULLable(Uint32* outBuffer, Uint32 attrDes2) { if (!disk_nullFlagCheck(req_struct, attrDes2)) { - ljam(); + jam(); return readDiskVarSizeNotNULL(outBuffer, req_struct, ahOut, attrDes2); } else { - ljam(); + jam(); ahOut->setNULL(); return true; } @@ -749,13 +756,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, if (checkUpdateOfPrimaryKey(req_struct, &inBuffer[inBufIndex], regTabPtr)) { - ljam(); + jam(); terrorCode= ZTRY_UPDATE_PRIMARY_KEY; return -1; } } UpdateFunction f= regTabPtr->updateFunctionArray[attributeId]; - ljam(); + jam(); req_struct->attr_descriptor= attrDescriptor; req_struct->changeMask.set(attributeId); if (attributeId >= 64) { @@ -771,13 +778,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, inBufIndex= req_struct->in_buf_index; continue; } else { - ljam(); + jam(); return -1; } } else if(attributeId == AttributeHeader::DISK_REF) { - ljam(); + jam(); Uint32 sz= ahIn.getDataSize(); ndbrequire(sz == 2); req_struct->m_tuple_ptr->m_header_bits |= Tuple_header::DISK_PART; @@ -786,9 +793,18 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct, inBufIndex += 1 + sz; req_struct->in_buf_index = inBufIndex; } + else if(attributeId == AttributeHeader::ANY_VALUE) + { + jam(); + Uint32 sz= ahIn.getDataSize(); + ndbrequire(sz == 1); + regOperPtr->m_any_value = * (inBuffer + inBufIndex + 1); + inBufIndex += 1 + sz; + req_struct->in_buf_index = inBufIndex; + } else { - ljam(); + jam(); terrorCode= ZATTRIBUTE_ID_ERROR; return -1; } @@ -842,13 +858,13 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct, ndbrequire(req_struct->out_buf_index == ahOut->getDataSize()); if (ahIn.getDataSize() != ahOut->getDataSize()) { - ljam(); + jam(); return true; } if (memcmp(&keyReadBuffer[0], &updateBuffer[1], req_struct->out_buf_index << 2) != 0) { - ljam(); + jam(); return true; } return false; @@ -871,17 +887,17 @@ Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { Uint32 updateWord= inBuffer[indexBuf + 1]; if (!nullIndicator) { - ljam(); + jam(); req_struct->in_buf_index= newIndex; tuple_header[updateOffset]= updateWord; return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -906,18 +922,18 @@ Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer, Uint32 updateWord1= inBuffer[indexBuf + 1]; Uint32 updateWord2= inBuffer[indexBuf + 2]; if (!nullIndicator) { - ljam(); + jam(); req_struct->in_buf_index= newIndex; tuple_header[updateOffset]= updateWord1; tuple_header[updateOffset + 1]= updateWord2; return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -943,9 +959,9 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { if (!nullIndicator) { - ljam(); + jam(); if (charsetFlag) { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); @@ -957,14 +973,14 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; Uint32 lb, len; if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } // fast fix bug#7340 if (typeId != NDB_TYPE_TEXT && (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } @@ -976,12 +992,12 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -999,7 +1015,7 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); return updateFixedSizeTHManyWordNotNULL(inBuffer, req_struct, @@ -1008,11 +1024,11 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, Uint32 newIndex= req_struct->in_buf_index + 1; if (newIndex <= req_struct->in_buf_len) { BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - ljam(); + jam(); req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1025,7 +1041,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, Uint32 attr_des2) { Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind; - Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size; + Uint32 vsize_in_words, new_index, max_var_size; Uint32 var_attr_pos; char *var_data_start; Uint16 *vpos_array; @@ -1046,7 +1062,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { if (!null_ind) { - ljam(); + jam(); var_attr_pos= vpos_array[var_index]; var_data_start= req_struct->m_var_data[MM].m_data_ptr; vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; @@ -1057,12 +1073,12 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, size_in_bytes); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1082,7 +1098,7 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer, Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset; if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); return updateVarSizeNotNULL(inBuffer, req_struct, @@ -1092,13 +1108,13 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer, Uint32 var_index= AttributeOffset::getOffset(attrDes2); Uint32 var_pos= req_struct->var_pos_array[var_index]; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); req_struct->var_pos_array[var_index+idx]= var_pos; req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1110,7 +1126,7 @@ Dbtup::updateDynFixedSize(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -1120,7 +1136,7 @@ Dbtup::updateDynVarSize(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32 attrDes2) { - ljam(); + jam(); terrorCode= ZVAR_SIZED_NOT_SUPPORTED; return false; } @@ -1222,7 +1238,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer, Uint32 maxRead = req_struct->max_read; Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; @@ -1231,7 +1247,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer, return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1255,20 +1271,20 @@ Dbtup::readBitsNULLable(Uint32* outBuffer, if(BitmaskImpl::get(regTabPtr->m_offsets[MM].m_null_words, bits, pos)) { - ljam(); + jam(); ahOut->setNULL(); return true; } if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; BitmaskImpl::getField(regTabPtr->m_offsets[MM].m_null_words, bits, pos+1, bitCount, outBuffer+indexBuf); return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1297,12 +1313,12 @@ Dbtup::updateBitsNotNULL(Uint32* inBuffer, req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZNOT_NULL_ATTR; return false; }//if } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1335,13 +1351,13 @@ Dbtup::updateBitsNULLable(Uint32* inBuffer, Uint32 newIndex = indexBuf + 1; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1368,9 +1384,9 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, if (newIndex <= inBufLen) { if (!nullIndicator) { - ljam(); + jam(); if (charsetFlag) { - ljam(); + jam(); Tablerec* regTabPtr = tabptr.p; Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); @@ -1382,14 +1398,14 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; Uint32 lb, len; if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } // fast fix bug#7340 if (typeId != NDB_TYPE_TEXT && (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - ljam(); + jam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; } @@ -1400,12 +1416,12 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, noOfWords); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1423,7 +1439,7 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer, Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); return updateDiskFixedSizeNotNULL(inBuffer, req_struct, @@ -1432,11 +1448,11 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer, Uint32 newIndex= req_struct->in_buf_index + 1; if (newIndex <= req_struct->in_buf_len) { BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - ljam(); + jam(); req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1449,7 +1465,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, Uint32 attr_des2) { Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind; - Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size; + Uint32 vsize_in_words, new_index, max_var_size; Uint32 var_attr_pos; char *var_data_start; Uint16 *vpos_array; @@ -1470,7 +1486,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { if (!null_ind) { - ljam(); + jam(); var_attr_pos= vpos_array[var_index]; var_data_start= req_struct->m_var_data[DD].m_data_ptr; vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; @@ -1481,12 +1497,12 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, size_in_bytes); return true; } else { - ljam(); + jam(); terrorCode= ZNOT_NULL_ATTR; return false; } } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1506,7 +1522,7 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer, Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset; if (!nullIndicator) { - ljam(); + jam(); BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); return updateDiskVarSizeNotNULL(inBuffer, req_struct, @@ -1516,13 +1532,13 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer, Uint32 var_index= AttributeOffset::getOffset(attrDes2); Uint32 var_pos= req_struct->var_pos_array[var_index]; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); req_struct->var_pos_array[var_index+idx]= var_pos; req_struct->in_buf_index= newIndex; return true; } else { - ljam(); + jam(); terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } @@ -1544,7 +1560,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer, Uint32 maxRead = req_struct->max_read; Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; @@ -1553,7 +1569,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer, return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1577,20 +1593,20 @@ Dbtup::readDiskBitsNULLable(Uint32* outBuffer, if(BitmaskImpl::get(regTabPtr->m_offsets[DD].m_null_words, bits, pos)) { - ljam(); + jam(); ahOut->setNULL(); return true; } if (newIndexBuf <= maxRead) { - ljam(); + jam(); ahOut->setDataSize((bitCount + 31) >> 5); req_struct->out_buf_index = newIndexBuf; BitmaskImpl::getField(regTabPtr->m_offsets[DD].m_null_words, bits, pos+1, bitCount, outBuffer+indexBuf); return true; } else { - ljam(); + jam(); terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; return false; }//if @@ -1619,12 +1635,12 @@ Dbtup::updateDiskBitsNotNULL(Uint32* inBuffer, req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZNOT_NULL_ATTR; return false; }//if } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if @@ -1657,13 +1673,13 @@ Dbtup::updateDiskBitsNULLable(Uint32* inBuffer, Uint32 newIndex = indexBuf + 1; if (newIndex <= req_struct->in_buf_len) { - ljam(); + jam(); BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); req_struct->in_buf_index = newIndex; return true; } else { - ljam(); + jam(); terrorCode = ZAI_INCONSISTENCY_ERROR; return false; }//if diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp index eecbee4c058..a5f7d4be0a9 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_SCAN_CPP #include "Dbtup.hpp" #include <signaldata/AccScan.hpp> #include <signaldata/NextScan.hpp> @@ -892,7 +893,6 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr) { ndbassert(bits & ScanOp::SCAN_NR); Local_key& key_mm = pos.m_key_mm; - Fix_page* page = (Fix_page*)pos.m_page; if (! (bits & ScanOp::SCAN_DD)) { key_mm = pos.m_key; // caller has already set pos.m_get to next tuple diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp index b2c42418418..12d5f8aba38 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_STORE_PROC_DEF_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(18000 + __LINE__); } -#define ljamEntry() { jamEntryLine(18000 + __LINE__); } - /* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */ /* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */ @@ -32,7 +30,7 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal) { OperationrecPtr regOperPtr; TablerecPtr regTabPtr; - ljamEntry(); + jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); regTabPtr.i = signal->theData[1]; @@ -46,17 +44,17 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal) ndbrequire(regTabPtr.p->tableStatus == DEFINED); switch (requestInfo) { case ZSCAN_PROCEDURE: - ljam(); + jam(); scanProcedure(signal, regOperPtr.p, signal->theData[4]); break; case ZCOPY_PROCEDURE: - ljam(); + jam(); copyProcedure(signal, regTabPtr, regOperPtr.p); break; case ZSTORED_PROCEDURE_DELETE: - ljam(); + jam(); deleteScanProcedure(signal, regOperPtr.p); break; default: @@ -124,14 +122,14 @@ void Dbtup::copyProcedure(Signal* signal, AttributeHeader::init(&signal->theData[length + 1], Ti, 0); length++; if (length == 24) { - ljam(); + jam(); ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, signal->theData+1, length, true)); length = 0; }//if }//for if (length != 0) { - ljam(); + jam(); ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, signal->theData+1, length, true)); }//if @@ -155,7 +153,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen); if ((RnoFree > MIN_ATTRBUF) || (copyProcedure)) { - ljam(); + jam(); regAttrPtr.i = cfirstfreeAttrbufrec; ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec); regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0; @@ -163,18 +161,18 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, cnoFreeAttrbufrec = RnoFree - 1; regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; } else { - ljam(); + jam(); storedSeizeAttrinbufrecErrorLab(signal, regOperPtr); return false; }//if if (regOperPtr->firstAttrinbufrec == RNIL) { - ljam(); + jam(); regOperPtr->firstAttrinbufrec = regAttrPtr.i; }//if regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; if (regOperPtr->lastAttrinbufrec != RNIL) { AttrbufrecPtr tempAttrinbufptr; - ljam(); + jam(); tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec; ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec); tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i; @@ -187,7 +185,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, length); if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) { - ljam(); + jam(); return true; }//if if (ERROR_INSERTED(4005) && !copyProcedure) { @@ -205,6 +203,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal, storedPtr.p->storedLinkLast = regOperPtr->lastAttrinbufrec; regOperPtr->firstAttrinbufrec = RNIL; regOperPtr->lastAttrinbufrec = RNIL; + regOperPtr->m_any_value = 0; set_trans_state(regOperPtr, TRANS_IDLE); signal->theData[0] = regOperPtr->userpointer; signal->theData[1] = storedPtr.i; @@ -222,6 +221,7 @@ void Dbtup::storedSeizeAttrinbufrecErrorLab(Signal* signal, storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec; regOperPtr->firstAttrinbufrec = RNIL; regOperPtr->lastAttrinbufrec = RNIL; + regOperPtr->m_any_value = 0; set_trans_state(regOperPtr, TRANS_ERROR_WAIT_STORED_PROCREQ); signal->theData[0] = regOperPtr->userpointer; signal->theData[1] = ZSTORED_SEIZE_ATTRINBUFREC_ERROR; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp index b85a2a8394d..6406bdefe1e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp @@ -15,14 +15,12 @@ #define DBTUP_C +#define DBTUP_TAB_DES_MAN_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> #include <pc.hpp> -#define ljam() { jamLine(22000 + __LINE__); } -#define ljamEntry() { jamEntryLine(22000 + __LINE__); } - /* * TABLE DESCRIPTOR MEMORY MANAGER * @@ -65,30 +63,30 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset) allocSize = (((allocSize - 1) >> 4) + 1) << 4; Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */ for (Uint32 i = list; i < 16; i++) { - ljam(); + jam(); if (cfreeTdList[i] != RNIL) { - ljam(); + jam(); reference = cfreeTdList[i]; removeTdArea(reference, i); /* REMOVE THE AREA FROM THE FREELIST */ Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */ if (retNo >= ZTD_FREE_SIZE) { - ljam(); + jam(); // return unused words, of course without attempting left merge Uint32 retRef = reference + allocSize; freeTabDescr(retRef, retNo, false); } else { - ljam(); + jam(); allocSize = 1 << i; }//if break; }//if }//for if (reference == RNIL) { - ljam(); + jam(); terrorCode = ZMEM_NOTABDESCR_ERROR; return RNIL; } else { - ljam(); + jam(); setTabDescrWord((reference + allocSize) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); setTabDescrWord(reference + ZTD_DATASIZE, allocSize); @@ -105,7 +103,7 @@ void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal) { itdaMergeTabDescr(retRef, retNo, normal); /* MERGE WITH POSSIBLE NEIGHBOURS */ while (retNo >= ZTD_FREE_SIZE) { - ljam(); + jam(); Uint32 list = nextHigherTwoLog(retNo); list--; /* RETURN TO NEXT LOWER LIST */ Uint32 sizeOfChunk = 1 << list; @@ -136,7 +134,7 @@ void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list) setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE); setTabDescrWord(tabDesRef + ZTD_FL_NEXT, cfreeTdList[list]); if (cfreeTdList[list] != RNIL) { - ljam(); /* PREVIOUSLY EMPTY SLOT */ + jam(); /* PREVIOUSLY EMPTY SLOT */ setTabDescrWord(cfreeTdList[list] + ZTD_FL_PREV, tabDesRef); }//if cfreeTdList[list] = tabDesRef; /* RELINK THE LIST */ @@ -156,28 +154,28 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal) { // merge right while ((retRef + retNo) < cnoOfTabDescrRec) { - ljam(); + jam(); Uint32 tabDesRef = retRef + retNo; Uint32 headerWord = getTabDescrWord(tabDesRef + ZTD_FL_HEADER); if (headerWord == ZTD_TYPE_FREE) { - ljam(); + jam(); Uint32 sizeOfMergedPart = getTabDescrWord(tabDesRef + ZTD_FL_SIZE); retNo += sizeOfMergedPart; Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); removeTdArea(tabDesRef, list); } else { - ljam(); + jam(); break; } } // merge left const bool mergeLeft = normal; while (mergeLeft && retRef > 0) { - ljam(); + jam(); Uint32 trailerWord = getTabDescrWord(retRef - ZTD_TR_TYPE); if (trailerWord == ZTD_TYPE_FREE) { - ljam(); + jam(); Uint32 sizeOfMergedPart = getTabDescrWord(retRef - ZTD_TR_SIZE); ndbrequire(retRef >= sizeOfMergedPart); retRef -= sizeOfMergedPart; @@ -185,7 +183,7 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal) Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); removeTdArea(retRef, list); } else { - ljam(); + jam(); break; } } @@ -213,15 +211,15 @@ void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list) setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); if (tabDesRef == cfreeTdList[list]) { - ljam(); + jam(); cfreeTdList[list] = tabDescrNextPtr; /* RELINK THE LIST */ }//if if (tabDescrNextPtr != RNIL) { - ljam(); + jam(); setTabDescrWord(tabDescrNextPtr + ZTD_FL_PREV, tabDescrPrevPtr); }//if if (tabDescrPrevPtr != RNIL) { - ljam(); + jam(); setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr); }//if }//Dbtup::removeTdArea() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index f85f3665ab3..09d71a19add 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -15,6 +15,7 @@ #define DBTUP_C +#define DBTUP_TRIGGER_CPP #include "Dbtup.hpp" #include <RefConvert.hpp> #include <ndb_limits.h> @@ -26,9 +27,6 @@ #include <signaldata/CreateTrig.hpp> #include <signaldata/TuxMaint.hpp> -#define ljam() { jamLine(7000 + __LINE__); } -#define ljamEntry() { jamEntryLine(7000 + __LINE__); } - /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* ----------------------- TRIGGER HANDLING ----------------------- */ @@ -47,17 +45,17 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::SUBSCRIPTION_BEFORE: switch (tevent) { case TriggerEvent::TE_INSERT: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionInsertTriggers; break; case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionUpdateTriggers; break; case TriggerEvent::TE_DELETE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_DETACHED) tlist = &table->subscriptionDeleteTriggers; break; @@ -68,17 +66,17 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::SECONDARY_INDEX: switch (tevent) { case TriggerEvent::TE_INSERT: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterInsertTriggers; break; case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterUpdateTriggers; break; case TriggerEvent::TE_DELETE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->afterDeleteTriggers; break; @@ -89,7 +87,7 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::ORDERED_INDEX: switch (tevent) { case TriggerEvent::TE_CUSTOM: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_CUSTOM) tlist = &table->tuxCustomTriggers; break; @@ -100,7 +98,7 @@ Dbtup::findTriggerList(Tablerec* table, case TriggerType::READ_ONLY_CONSTRAINT: switch (tevent) { case TriggerEvent::TE_UPDATE: - ljam(); + jam(); if (ttime == TriggerActionTime::TA_AFTER) tlist = &table->constraintUpdateTriggers; break; @@ -118,7 +116,7 @@ Dbtup::findTriggerList(Tablerec* table, void Dbtup::execCREATE_TRIG_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr(); const CreateTrigReq* const req = &reqCopy; @@ -131,13 +129,13 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) if (tabPtr.p->tableStatus != DEFINED ) { - ljam(); + jam(); error= CreateTrigRef::InvalidTable; } // Create trigger and associate it with the table else if (createTrigger(tabPtr.p, req)) { - ljam(); + jam(); // Send conf CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend(); conf->setUserRef(reference()); @@ -153,7 +151,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) } else { - ljam(); + jam(); error= CreateTrigRef::TooManyTriggers; } ndbassert(error != CreateTrigRef::NoError); @@ -174,7 +172,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal) void Dbtup::execDROP_TRIG_REQ(Signal* signal) { - ljamEntry(); + jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr(); const DropTrigReq* const req = &reqCopy; @@ -262,7 +260,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) && ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) || (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) { - ljam(); + jam(); tptr.p->sendBeforeValues = false; } /* @@ -270,7 +268,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) || (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) && (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) { - ljam(); + jam(); tptr.p->sendOnlyChangedAttributes = true; } */ @@ -282,16 +280,16 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) tptr.p->attributeMask.clear(); if (tptr.p->monitorAllAttributes) { - ljam(); + jam(); for(Uint32 i = 0; i < table->m_no_of_attributes; i++) { if (!primaryKey(table, i)) { - ljam(); + jam(); tptr.p->attributeMask.set(i); } } } else { // Set attribute mask - ljam(); + jam(); tptr.p->attributeMask = req->getAttributeMask(); } return true; @@ -336,7 +334,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) Ptr<TupTriggerData> ptr; for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) { - ljam(); + jam(); if (ptr.p->triggerId == triggerId) { if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock) { @@ -348,10 +346,10 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) * * Backup doesn't really care about the Ids though. */ - ljam(); + jam(); continue; } - ljam(); + jam(); tlist->release(ptr.i); return 0; } @@ -379,7 +377,7 @@ Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterInsertTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->afterInsertTriggers, regOperPtr); @@ -397,14 +395,14 @@ Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterUpdateTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->afterUpdateTriggers, regOperPtr); } if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) { - ljam(); + jam(); fireImmediateTriggers(req_struct, regTablePtr->constraintUpdateTriggers, regOperPtr); @@ -422,7 +420,7 @@ Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct, if ((regOperPtr->op_struct.primary_replica) && (!(regTablePtr->afterDeleteTriggers.isEmpty()))) { - ljam(); + jam(); executeTriggers(req_struct, regTablePtr->afterDeleteTriggers, regOperPtr); @@ -443,7 +441,7 @@ void Dbtup::checkDeferredTriggers(Signal* signal, Operationrec* const regOperPtr, Tablerec* const regTablePtr) { - ljam(); + jam(); // NYI }//Dbtup::checkDeferredTriggers() #endif @@ -480,7 +478,7 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, if (save_ptr->m_header_bits & Tuple_header::ALLOC) { if (save_type == ZDELETE) { // insert + delete = nothing - ljam(); + jam(); return; goto end; } @@ -496,10 +494,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); if (regTablePtr->subscriptionInsertTriggers.isEmpty()) { // Table has no active triggers monitoring inserts at commit - ljam(); + jam(); goto end; } @@ -509,10 +507,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, regOperPtr, disk); break; case(ZDELETE): - ljam(); + jam(); if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) { // Table has no active triggers monitoring deletes at commit - ljam(); + jam(); goto end; } @@ -523,10 +521,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, regOperPtr, disk); break; case(ZUPDATE): - ljam(); + jam(); if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) { // Table has no active triggers monitoring updates at commit - ljam(); + jam(); goto end; } @@ -554,10 +552,10 @@ Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr); @@ -576,10 +574,10 @@ Dbtup::fireDeferredTriggers(Signal* signal, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr, regOperPtr); @@ -606,12 +604,12 @@ Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct, ndbrequire(regOperPtr->is_first_operation()); triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); if ((trigPtr.p->monitorReplicas || regOperPtr->op_struct.primary_replica) && (trigPtr.p->monitorAllAttributes || trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr, @@ -628,7 +626,7 @@ void Dbtup::executeTriggers(KeyReqStruct *req_struct, TriggerPtr trigPtr; triggerList.first(trigPtr); while (trigPtr.i != RNIL) { - ljam(); + jam(); executeTrigger(req_struct, trigPtr.p, regOperPtr); @@ -679,7 +677,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); if (ref == BACKUP) { - ljam(); + jam(); /* In order for the implementation of BACKUP to work even when changing primaries in the middle of the backup we need to set the trigger on @@ -692,9 +690,9 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, signal->theData[0] = trigPtr->triggerId; signal->theData[1] = regFragPtr.p->fragmentId; EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2); - ljamEntry(); + jamEntry(); if (signal->theData[0] == 0) { - ljam(); + jam(); return; } } @@ -709,7 +707,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, beforeBuffer, noBeforeWords, disk)) { - ljam(); + jam(); return; } //-------------------------------------------------------------------- @@ -725,13 +723,13 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, switch(trigPtr->triggerType) { case (TriggerType::SECONDARY_INDEX): - ljam(); + jam(); ref = req_struct->TC_ref; executeDirect = false; break; case (TriggerType::SUBSCRIPTION): case (TriggerType::SUBSCRIPTION_BEFORE): - ljam(); + jam(); // Since only backup uses subscription triggers we send to backup directly for now ref = trigPtr->m_receiverBlock; executeDirect = true; @@ -752,22 +750,22 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); // Send AttrInfo signals with new attribute values trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES); sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref); break; case(ZDELETE): if (trigPtr->sendBeforeValues) { - ljam(); + jam(); trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); } break; case(ZUPDATE): - ljam(); + jam(); if (trigPtr->sendBeforeValues) { - ljam(); + jam(); trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); } @@ -793,9 +791,9 @@ Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask, { Uint32 bufIndx = 0; for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) { - ljam(); + jam(); if (attributeMask.get(i)) { - ljam(); + jam(); AttributeHeader::init(&inBuffer[bufIndx++], i, 0); } } @@ -863,7 +861,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, Uint32 numAttrsToRead; if ((regOperPtr->op_struct.op_type == ZUPDATE) && (trigPtr->sendOnlyChangedAttributes)) { - ljam(); + jam(); //-------------------------------------------------------------------- // Update that sends only changed information //-------------------------------------------------------------------- @@ -875,13 +873,13 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, } else if ((regOperPtr->op_struct.op_type == ZDELETE) && (!trigPtr->sendBeforeValues)) { - ljam(); + jam(); //-------------------------------------------------------------------- // Delete without sending before values only read Primary Key //-------------------------------------------------------------------- return true; } else { - ljam(); + jam(); //-------------------------------------------------------------------- // All others send all attributes that are monitored, except: // Omit unchanged blob inlines on update i.e. @@ -903,7 +901,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, //-------------------------------------------------------------------- if (regOperPtr->op_struct.op_type != ZDELETE) { - ljam(); + jam(); int ret = readAttributes(req_struct, &readBuffer[0], numAttrsToRead, @@ -913,7 +911,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, ndbrequire(ret != -1); noAfterWords= ret; } else { - ljam(); + jam(); noAfterWords = 0; } @@ -925,7 +923,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, if ((regOperPtr->op_struct.op_type == ZUPDATE || regOperPtr->op_struct.op_type == ZDELETE) && (trigPtr->sendBeforeValues)) { - ljam(); + jam(); Tuple_header *save= req_struct->m_tuple_ptr; PagePtr tmp; @@ -961,7 +959,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, // Although a trigger was fired it was not necessary since the old // value and the new value was exactly the same //-------------------------------------------------------------------- - ljam(); + jam(); //XXX does this work with collations? return false; } @@ -981,21 +979,21 @@ void Dbtup::sendTrigAttrInfo(Signal* signal, do { sigLen = dataLen - dataIndex; if (sigLen > TrigAttrInfo::DataLength) { - ljam(); + jam(); sigLen = TrigAttrInfo::DataLength; } MEMCOPY_NO_WORDS(trigAttrInfo->getData(), data + dataIndex, sigLen); if (executeDirect) { - ljam(); + jam(); EXECUTE_DIRECT(receiverReference, GSN_TRIG_ATTRINFO, signal, TrigAttrInfo::StaticLength + sigLen); - ljamEntry(); + jamEntry(); } else { - ljam(); + jam(); sendSignal(receiverReference, GSN_TRIG_ATTRINFO, signal, @@ -1023,15 +1021,15 @@ void Dbtup::sendFireTrigOrd(Signal* signal, switch(regOperPtr->op_struct.op_type) { case(ZINSERT): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT); break; case(ZDELETE): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE); break; case(ZUPDATE): - ljam(); + jam(); fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE); break; default: @@ -1045,23 +1043,24 @@ void Dbtup::sendFireTrigOrd(Signal* signal, switch(trigPtr->triggerType) { case (TriggerType::SECONDARY_INDEX): - ljam(); + jam(); sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD, signal, FireTrigOrd::SignalLength, JBB); break; case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma - ljam(); + jam(); // Since only backup uses subscription triggers we // send to backup directly for now fireTrigOrd->setGCI(req_struct->gci); fireTrigOrd->setHashValue(req_struct->hash_value); + fireTrigOrd->m_any_value = regOperPtr->m_any_value; EXECUTE_DIRECT(trigPtr->m_receiverBlock, GSN_FIRE_TRIG_ORD, signal, - FireTrigOrd::SignalWithHashValueLength); + FireTrigOrd::SignalLengthSuma); break; case (TriggerType::SUBSCRIPTION): - ljam(); + jam(); // Since only backup uses subscription triggers we // send to backup directly for now fireTrigOrd->setGCI(req_struct->gci); @@ -1128,7 +1127,7 @@ Dbtup::addTuxEntries(Signal* signal, Tablerec* regTabPtr) { if (ERROR_INSERTED(4022)) { - ljam(); + jam(); CLEAR_ERROR_INSERT_VALUE; terrorCode = 9999; return -1; @@ -1139,12 +1138,12 @@ Dbtup::addTuxEntries(Signal* signal, Uint32 failPtrI; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL; if (ERROR_INSERTED(4023) && ! triggerList.hasNext(triggerPtr)) { - ljam(); + jam(); CLEAR_ERROR_INSERT_VALUE; terrorCode = 9999; failPtrI = triggerPtr.i; @@ -1152,9 +1151,9 @@ Dbtup::addTuxEntries(Signal* signal, } EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); if (req->errorCode != 0) { - ljam(); + jam(); terrorCode = req->errorCode; failPtrI = triggerPtr.i; goto fail; @@ -1166,12 +1165,12 @@ fail: req->opInfo = TuxMaintReq::OpRemove; triggerList.first(triggerPtr); while (triggerPtr.i != failPtrI) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL; EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); } @@ -1202,15 +1201,15 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, if (regOperPtr->op_struct.op_type == ZINSERT) { if (! regOperPtr->op_struct.delete_insert_flag) return; - ljam(); + jam(); tupVersion= decr_tup_version(regOperPtr->tupVersion); } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - ljam(); + jam(); tupVersion= decr_tup_version(regOperPtr->tupVersion); } else if (regOperPtr->op_struct.op_type == ZDELETE) { if (regOperPtr->op_struct.delete_insert_flag) return; - ljam(); + jam(); tupVersion= regOperPtr->tupVersion; } else { ndbrequire(false); @@ -1236,13 +1235,13 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, // get version Uint32 tupVersion; if (regOperPtr->op_struct.op_type == ZINSERT) { - ljam(); + jam(); tupVersion = regOperPtr->tupVersion; } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - ljam(); + jam(); tupVersion = regOperPtr->tupVersion; } else if (regOperPtr->op_struct.op_type == ZDELETE) { - ljam(); + jam(); return; } else { ndbrequire(false); @@ -1267,12 +1266,12 @@ Dbtup::removeTuxEntries(Signal* signal, TriggerPtr triggerPtr; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { - ljam(); + jam(); req->indexId = triggerPtr.p->indexId; req->errorCode = RNIL, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); - ljamEntry(); + jamEntry(); // must succeed ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 178744d7603..1929901f86e 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -14,12 +14,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define DBTUP_C +#define DBTUP_VAR_ALLOC_CPP #include "Dbtup.hpp" -#define ljam() { jamLine(32000 + __LINE__); } -#define ljamEntry() { jamEntryLine(32000 + __LINE__); } - - void Dbtup::init_list_sizes(void) { c_min_list_size[0]= 200; @@ -106,9 +103,9 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr, PagePtr pagePtr; pagePtr.i= get_alloc_page(fragPtr, (alloc_size + 1)); if (pagePtr.i == RNIL) { - ljam(); + jam(); if ((pagePtr.i= get_empty_var_page(fragPtr)) == RNIL) { - ljam(); + jam(); return 0; } c_page_pool.getPtr(pagePtr); @@ -124,7 +121,7 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr, pagePtr.p->page_state = ZTH_MM_FREE; } else { c_page_pool.getPtr(pagePtr); - ljam(); + jam(); } Uint32 idx= ((Var_page*)pagePtr.p) ->alloc_record(alloc_size, (Var_page*)ctemp_page, Var_page::CHAIN); @@ -160,7 +157,6 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, /** * TODO free fix + var part */ - Uint32 page_idx= key->m_page_idx; Uint32 *ptr = ((Fix_page*)pagePtr.p)->get_ptr(key->m_page_idx, 0); Tuple_header* tuple = (Tuple_header*)ptr; @@ -176,7 +172,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS); if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1) { - ljam(); + jam(); /* This code could be used when we release pages. remove_free_page(signal,fragPtr,page_header,page_header->list_index); @@ -184,7 +180,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr, */ update_free_page_list(fragPtr, pagePtr); } else { - ljam(); + jam(); update_free_page_list(fragPtr, pagePtr); } return; @@ -258,16 +254,16 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size) start_index= calculate_free_list_impl(alloc_size); if (start_index == (MAX_FREE_LIST - 1)) { - ljam(); + jam(); } else { - ljam(); + jam(); ndbrequire(start_index < (MAX_FREE_LIST - 1)); start_index++; } for (i= start_index; i < MAX_FREE_LIST; i++) { - ljam(); + jam(); if (!fragPtr->free_var_page_array[i].isEmpty()) { - ljam(); + jam(); return fragPtr->free_var_page_array[i].firstItem; } } @@ -276,9 +272,9 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size) LocalDLList<Page> list(c_page_pool, fragPtr->free_var_page_array[i]); for(list.first(pagePtr); !pagePtr.isNull() && loop < 16; ) { - ljam(); + jam(); if (pagePtr.p->free_space >= alloc_size) { - ljam(); + jam(); return pagePtr.i; } loop++; @@ -345,7 +341,7 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr, (free_space > c_max_list_size[list_index])) { Uint32 new_list_index= calculate_free_list_impl(free_space); if (list_index != MAX_FREE_LIST) { - ljam(); + jam(); /* * Only remove it from its list if it is in a list */ @@ -360,11 +356,11 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr, This can only happen for the free list with least guaranteed free space. */ - ljam(); + jam(); ndbrequire(new_list_index == 0); pagePtr.p->list_index= MAX_FREE_LIST; } else { - ljam(); + jam(); LocalDLList<Page> list(c_page_pool, fragPtr->free_var_page_array[new_list_index]); list.add(pagePtr); @@ -380,9 +376,9 @@ Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const { Uint32 i; for (i = 0; i < MAX_FREE_LIST; i++) { - ljam(); + jam(); if (free_space_size <= c_max_list_size[i]) { - ljam(); + jam(); return i; } } diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp index 8955faff99e..7ebbde93ac7 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp @@ -461,9 +461,8 @@ operator<< (NdbOut& out, const Tup_fixsize_page& page) << " free: " << page.free_space; out << " free list: " << hex << page.next_free_index << " " << flush; - Uint32 startTuple = page.next_free_index >> 16; - #if 0 + Uint32 startTuple = page.next_free_index >> 16; Uint32 cnt = 0; Uint32 next= startTuple; while((next & 0xFFFF) != 0xFFFF) diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp index 407dfae5865..44aa6182b54 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp @@ -94,6 +94,7 @@ struct Tup_fixsize_page * Alloc record from page * return page_idx **/ + Tup_fixsize_page() {} Uint32 alloc_record(); Uint32 alloc_record(Uint32 page_idx); Uint32 free_record(Uint32 page_idx); @@ -148,6 +149,7 @@ struct Tup_varsize_page Uint32 m_data[DATA_WORDS]; + Tup_varsize_page() {} void init(); Uint32* get_free_space_ptr() { diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp index 32cd7ab0460..13485a31414 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp @@ -401,8 +401,6 @@ Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos) void Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i) { - Frag& frag = dstNode.m_frag; - TreeHead& tree = frag.m_tree; ndbrequire(i <= 1); while (cnt != 0) { TreeEnt ent; diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index ef34fcefe19..a0643848530 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -707,7 +707,6 @@ Dbtux::scanFirst(ScanOpPtr scanPtr) debugOut << "Enter first scan " << scanPtr.i << " " << scan << endl; } #endif - TreeHead& tree = frag.m_tree; // set up index keys for this operation setKeyAttrs(frag); // scan direction 0, 1 @@ -987,7 +986,6 @@ Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent) const ScanOp& scan = *scanPtr.p; const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - Uint32 fragId = frag.m_fragId; Uint32 tupAddr = getTupAddr(frag, ent); Uint32 tupVersion = ent.m_tupVersion; // check for same tuple twice in row diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp index a0e1cbef61c..fe59b8bba2c 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp @@ -51,7 +51,6 @@ Dbtux::statRecordsInRange(ScanOpPtr scanPtr, Uint32* out) TreePos pos1 = scan.m_scanPos; TreePos pos2; { // as in scanFirst() - TreeHead& tree = frag.m_tree; setKeyAttrs(frag); const unsigned idir = 1; const ScanBound& bound = *scan.m_bound[idir]; diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp index 0d0c0ed9592..970ee794281 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp @@ -221,7 +221,6 @@ Dbtux::treeRemove(Frag& frag, TreePos treePos) void Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos) { - TreeHead& tree = frag.m_tree; TreeEnt ent; // find g.l.b node NodeHandle glbNode(frag); diff --git a/storage/ndb/src/kernel/blocks/diskpage.cpp b/storage/ndb/src/kernel/blocks/diskpage.cpp index 3f98e078746..50e9b6e53cb 100644 --- a/storage/ndb/src/kernel/blocks/diskpage.cpp +++ b/storage/ndb/src/kernel/blocks/diskpage.cpp @@ -49,7 +49,7 @@ operator<<(NdbOut& out, const File_formats::Zero_page_header& obj) char buf[256]; out << "page size: " << obj.m_page_size << endl; out << "ndb version: " << obj.m_ndb_version << ", " << - getVersionString(obj.m_ndb_version, 0, buf, sizeof(buf)) << endl; + ndbGetVersionString(obj.m_ndb_version, 0, buf, sizeof(buf)) << endl; out << "ndb node id: " << obj.m_node_id << endl; out << "file type: " << obj.m_file_type << endl; out << "time: " << obj.m_time << ", " diff --git a/storage/ndb/src/kernel/blocks/diskpage.hpp b/storage/ndb/src/kernel/blocks/diskpage.hpp index 579b538c910..4119c328e35 100644 --- a/storage/ndb/src/kernel/blocks/diskpage.hpp +++ b/storage/ndb/src/kernel/blocks/diskpage.hpp @@ -54,6 +54,7 @@ struct File_formats Uint32 m_node_id; Uint32 m_file_type; Uint32 m_time; // time(0) + Zero_page_header() {} void init(File_type ft, Uint32 node_id, Uint32 version, Uint32 now); int validate(File_type ft, Uint32 node_id, Uint32 version, Uint32 now); }; @@ -86,6 +87,7 @@ struct File_formats Uint32 m_fragment_id; Uint32 m_next_free_extent; }; + Extent_header() {} Uint32 m_page_bitmask[1]; // (BitsPerPage*ExtentSize)/(32*PageSize) Uint32 get_free_bits(Uint32 page) const; Uint32 get_free_word_offset(Uint32 page) const; @@ -102,6 +104,7 @@ struct File_formats struct Page_header m_page_header; Extent_header m_extents[1]; + Extent_page() {} Extent_header* get_header(Uint32 extent_no, Uint32 extent_size); }; diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index ce01b31927c..0481f7b399b 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -133,10 +133,6 @@ void Lgman::execSTTOR(Signal* signal) { jamEntry(); - - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; - sendSTTORRY(signal); return; @@ -274,7 +270,6 @@ Lgman::execDUMP_STATE_ORD(Signal* signal){ !ptr.p->m_log_sync_waiters.isEmpty()); if (!ptr.p->m_log_buffer_waiters.isEmpty()) { - Uint32 free_buffer= ptr.p->m_free_buffer_words; Ptr<Log_waiter> waiter; Local_log_waiter_list list(m_log_waiter_pool, ptr.p->m_log_buffer_waiters); @@ -351,6 +346,12 @@ Lgman::execCREATE_FILEGROUP_REQ(Signal* signal){ m_logfile_group_hash.add(ptr); m_logfile_group_list.add(ptr); + + if (getNodeState().getNodeRestartInProgress() || + getNodeState().getSystemRestartInProgress()) + { + ptr.p->m_state = Logfile_group::LG_STARTING; + } CreateFilegroupImplConf* conf= (CreateFilegroupImplConf*)signal->getDataPtr(); @@ -375,8 +376,6 @@ Lgman::execDROP_FILEGROUP_REQ(Signal* signal) { jamEntry(); - jamEntry(); - Uint32 errorCode = 0; DropFilegroupImplReq req = *(DropFilegroupImplReq*)signal->getDataPtr(); do @@ -441,7 +440,6 @@ Lgman::drop_filegroup_drop_files(Signal* signal, { jam(); ndbrequire(! (ptr.p->m_state & Logfile_group::LG_THREAD_MASK)); - ndbrequire(ptr.p->m_meta_files.isEmpty()); ndbrequire(ptr.p->m_outstanding_fs == 0); Local_undofile_list list(m_file_pool, ptr.p->m_files); @@ -457,6 +455,18 @@ Lgman::drop_filegroup_drop_files(Signal* signal, return; } + Local_undofile_list metalist(m_file_pool, ptr.p->m_meta_files); + if (metalist.first(file_ptr)) + { + jam(); + metalist.remove(file_ptr); + list.add(file_ptr); + file_ptr.p->m_create.m_senderRef = ref; + file_ptr.p->m_create.m_senderData = data; + create_file_abort(signal, ptr, file_ptr); + return; + } + free_logbuffer_memory(ptr); m_logfile_group_hash.release(ptr); DropFilegroupImplConf *conf = (DropFilegroupImplConf*)signal->getDataPtr(); @@ -467,7 +477,8 @@ Lgman::drop_filegroup_drop_files(Signal* signal, } void -Lgman::execCREATE_FILE_REQ(Signal* signal){ +Lgman::execCREATE_FILE_REQ(Signal* signal) +{ jamEntry(); CreateFileImplReq* req= (CreateFileImplReq*)signal->getDataPtr(); @@ -496,6 +507,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ switch(requestInfo){ case CreateFileImplReq::Commit: { + jam(); ndbrequire(find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)); file_ptr.p->m_create.m_senderRef = req->senderRef; file_ptr.p->m_create.m_senderData = req->senderData; @@ -508,6 +520,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ Uint32 senderData = req->senderData; if (find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)) { + jam(); file_ptr.p->m_create.m_senderRef = senderRef; file_ptr.p->m_create.m_senderData = senderData; create_file_abort(signal, ptr, file_ptr); @@ -515,11 +528,11 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){ else { CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); + jam(); conf->senderData = senderData; conf->senderRef = reference(); sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, CreateFileImplConf::SignalLength, JBB); - return; } return; } @@ -708,7 +721,8 @@ Lgman::create_file_commit(Signal* signal, Uint32 senderData = ptr.p->m_create.m_senderData; bool first= false; - if(ptr.p->m_state == Undofile::FS_CREATING) + if(ptr.p->m_state == Undofile::FS_CREATING && + (lg_ptr.p->m_state & Logfile_group::LG_ONLINE)) { jam(); Local_undofile_list free(m_file_pool, lg_ptr.p->m_files); @@ -1937,8 +1951,7 @@ void Lgman::execSUB_GCP_COMPLETE_REP(Signal* signal) { jamEntry(); - Uint32 gci= ((SubGcpCompleteRep*)signal->getDataPtr())->gci; - + Ptr<Logfile_group> ptr; m_logfile_group_list.first(ptr); @@ -2074,13 +2087,17 @@ Lgman::execSTART_RECREQ(Signal* signal) void Lgman::find_log_head(Signal* signal, Ptr<Logfile_group> ptr) { + ndbrequire(ptr.p->m_state & + (Logfile_group::LG_STARTING | Logfile_group::LG_SORTING)); + if(ptr.p->m_meta_files.isEmpty() && ptr.p->m_files.isEmpty()) { jam(); /** * Logfile_group wo/ any files */ - + ptr.p->m_state &= ~(Uint32)Logfile_group::LG_STARTING; + ptr.p->m_state |= Logfile_group::LG_ONLINE; m_logfile_group_list.next(ptr); signal->theData[0] = LgmanContinueB::FIND_LOG_HEAD; signal->theData[1] = ptr.i; diff --git a/storage/ndb/src/kernel/blocks/lgman.hpp b/storage/ndb/src/kernel/blocks/lgman.hpp index b26c3219088..d2706818144 100644 --- a/storage/ndb/src/kernel/blocks/lgman.hpp +++ b/storage/ndb/src/kernel/blocks/lgman.hpp @@ -175,13 +175,14 @@ public: ,LG_SORTING = 0x002 // Sorting files ,LG_SEARCHING = 0x004 // Searching in last file ,LG_EXEC_THREAD = 0x008 // Execute thread is running - ,LG_READ_THREAD = 0x010 // Read thread is running + ,LG_READ_THREAD = 0x010 // Read thread is running ,LG_FORCE_SYNC_THREAD = 0x020 ,LG_SYNC_WAITERS_THREAD = 0x040 ,LG_CUT_LOG_THREAD = 0x080 ,LG_WAITERS_THREAD = 0x100 ,LG_FLUSH_THREAD = 0x200 ,LG_DROPPING = 0x400 + ,LG_STARTING = 0x800 }; static const Uint32 LG_THREAD_MASK = Logfile_group::LG_FORCE_SYNC_THREAD | diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp index 9c02226596e..4db07591b60 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp @@ -82,6 +82,7 @@ public: */ struct StartRecord { + StartRecord() {} Uint64 m_startTime; void reset(); @@ -191,7 +192,6 @@ private: void execNDB_STARTCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); void execNDB_STARTREF(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execSTOP_PERM_REF(Signal* signal); void execSTOP_PERM_CONF(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp index a925eb4beaf..ae5afa7a57b 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp @@ -81,7 +81,6 @@ Ndbcntr::Ndbcntr(Block_context& ctx): addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF); addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ); addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF); - addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ); addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF); addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF); diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index d3924a586b0..56ecc8ddc39 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -862,7 +862,6 @@ Ndbcntr::trySystemRestart(Signal* signal){ */ const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes); const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes); - const Uint64 now = NdbTick_CurrentMillisecond(); if(!allClusterNodes){ jam(); @@ -1441,7 +1440,6 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) const bool tMasterFailed = allFailed.get(cmasterNodeId); const bool tStarted = !failedStarted.isclear(); const bool tStarting = !failedStarting.isclear(); - const bool tWaiting = !failedWaiting.isclear(); if(tMasterFailed){ jam(); @@ -1701,6 +1699,7 @@ void Ndbcntr::createSystableLab(Signal* signal, unsigned index) //w.add(DictTabInfo::NoOfVariable, (Uint32)0); //w.add(DictTabInfo::KeyLength, 1); w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType); + w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE); for (unsigned i = 0; i < table.columnCount; i++) { const SysColumn& column = table.columnList[i]; @@ -2085,23 +2084,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal) }//Ndbcntr::execDUMP_STATE_ORD() -void Ndbcntr::execSET_VAR_REQ(Signal* signal) { -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - - switch (var) { - case TimeToWaitAlive: - // Valid only during start so value not set. - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - }// switch -#endif -}//Ndbcntr::execSET_VAR_REQ() - void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{ NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0]; @@ -2530,8 +2512,6 @@ void Ndbcntr::execABORT_ALL_CONF(Signal* signal){ void Ndbcntr::execABORT_ALL_REF(Signal* signal){ jamEntry(); - AbortAllRef *abortAllRef = (AbortAllRef *)&signal->theData[0]; - AbortAllRef::ErrorCode errorCode = (AbortAllRef::ErrorCode) abortAllRef->errorCode; StopRef * const stopRef = (StopRef *)&signal->theData[0]; stopRef->senderData = c_stopRec.stopReq.senderData; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index cf18bf34040..5300d5bbfd9 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -99,6 +99,7 @@ AsyncFile::AsyncFile(SimulatedBlock& fs) : { m_page_ptr.setNull(); m_current_request= m_last_request= 0; + m_open_flags = 0; } void @@ -328,6 +329,7 @@ void AsyncFile::openReq(Request* request) { m_auto_sync_freq = 0; m_write_wo_sync = 0; + m_open_flags = request->par.open.flags; // for open.flags, see signal FSOPENREQ #ifdef NDB_WIN32 @@ -954,7 +956,12 @@ AsyncFile::writevReq( Request * request) void AsyncFile::closeReq(Request * request) { - syncReq(request); + if (m_open_flags & ( + FsOpenReq::OM_WRITEONLY | + FsOpenReq::OM_READWRITE | + FsOpenReq::OM_APPEND )) { + syncReq(request); + } #ifdef NDB_WIN32 if(!CloseHandle(hFile)) { request->error = GetLastError(); diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp index 64567dd2bb8..e4a01753acd 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp @@ -106,6 +106,8 @@ class AsyncFile; class Request { public: + Request() {} + enum Action { open, close, @@ -222,6 +224,8 @@ private: #else int theFd; #endif + + Uint32 m_open_flags; // OM_ flags from request to open file MemoryChannel<Request> *theReportTo; MemoryChannel<Request>* theMemoryChannelPtr; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index 6fb9ef774d0..26bf8878852 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -105,11 +105,11 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signal) theRequestPool = new Pool<Request>; - m_maxFiles = 40; + m_maxFiles = 0; ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles); Uint32 noIdleFiles = 27; ndb_mgm_get_int_parameter(p, CFG_DB_INITIAL_OPEN_FILES, &noIdleFiles); - if (noIdleFiles > m_maxFiles) + if (noIdleFiles > m_maxFiles && m_maxFiles != 0) m_maxFiles = noIdleFiles; // Create idle AsyncFiles for (Uint32 i = 0; i < noIdleFiles; i++){ @@ -217,6 +217,8 @@ Ndbfs::execFSOPENREQ(Signal* signal) releaseSections(signal); } file->reportTo(&theFromThreads); + if (getenv("NDB_TRACE_OPEN")) + ndbout_c("open(%s)", file->theFileName.c_str()); Request* request = theRequestPool->get(); request->action = Request::open; @@ -650,7 +652,7 @@ AsyncFile* Ndbfs::createAsyncFile(){ // Check limit of open files - if (theFiles.size()+1 == m_maxFiles) { + if (m_maxFiles !=0 && theFiles.size() == m_maxFiles) { // Print info about all open files for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 1f914639d6b..aa1f04c720c 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -527,7 +527,6 @@ Pgman::lirs_stack_prune() debugOut << "PGMAN: >lirs_stack_prune" << endl; #endif Page_stack& pl_stack = m_page_stack; - Page_queue& pl_queue = m_page_queue; Ptr<Page_entry> ptr; while (pl_stack.first(ptr)) // first is stack bottom @@ -874,7 +873,6 @@ Pgman::process_bind(Signal* signal, Ptr<Page_entry> ptr) #ifdef VM_TRACE debugOut << "PGMAN: " << ptr << " : process_bind" << endl; #endif - Page_sublist& pl_bind = *m_page_sublist[Page_entry::SL_BIND]; Page_queue& pl_queue = m_page_queue; Ptr<GlobalPage> gptr; diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 92b8b5d3306..8d51b24ec6a 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -105,6 +105,7 @@ public: }; struct StartRecord { + StartRecord() {} void reset(){ m_startKey++; m_startNode = 0; @@ -128,6 +129,7 @@ public: Uint32 m_president_candidate_gci; Uint16 m_regReqReqSent; Uint16 m_regReqReqRecv; + Uint32 m_node_gci[MAX_NDB_NODES]; } c_start; NdbNodeBitmask c_definedNodes; // DB nodes in config @@ -173,6 +175,7 @@ public: }; struct ArbitRec { + ArbitRec() {} ArbitState state; // state bool newstate; // flag to initialize new state unsigned thread; // identifies a continueB "thread" @@ -246,7 +249,6 @@ private: void execAPI_REGREQ(Signal* signal); void execAPI_FAILCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); - void execSET_VAR_REQ(Signal* signal); void execAPI_FAILREQ(Signal* signal); void execREAD_NODESREF(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index 23bbe94f020..f9950072ab4 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -90,7 +90,6 @@ Qmgr::Qmgr(Block_context& ctx) addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ); addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF); addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ); - addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ); addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP); addRecSignal(GSN_NODE_FAILREP, &Qmgr::execNODE_FAILREP); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 3a6f5151fec..1fba4d62e17 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -849,7 +849,6 @@ void Qmgr::execCM_REGCONF(Signal* signal) jamEntry(); const CmRegConf * const cmRegConf = (CmRegConf *)&signal->theData[0]; - Uint32 presidentNodeId = cmRegConf->presidentNodeId; if (!ndbCompatible_ndb_ndb(NDB_VERSION, cmRegConf->presidentVersion)) { jam(); @@ -1093,7 +1092,8 @@ void Qmgr::execCM_REGREF(Signal* signal) jam(); c_start.m_starting_nodes_w_log.set(TaddNodeno); } - + c_start.m_node_gci[TaddNodeno] = node_gci; + skip_nodes.bitAND(c_definedNodes); c_start.m_skip_nodes.bitOR(skip_nodes); @@ -1242,6 +1242,7 @@ Qmgr::check_startup(Signal* signal) wait.bitANDC(tmp); Uint32 retVal = 0; + Uint32 incompleteng = MAX_NDB_NODES; // Illegal value NdbNodeBitmask report_mask; if ((c_start.m_latest_gci == 0) || @@ -1274,7 +1275,6 @@ Qmgr::check_startup(Signal* signal) /** * Check for missing node group directly */ - char buf[100]; NdbNodeBitmask check; check.assign(c_definedNodes); check.bitANDC(c_start.m_starting_nodes); // Not connected nodes @@ -1327,7 +1327,7 @@ Qmgr::check_startup(Signal* signal) report_mask.assign(c_definedNodes); report_mask.bitANDC(c_start.m_starting_nodes); retVal = 1; - goto start_report; + goto check_log; case CheckNodeGroups::Partitioning: ndbrequire(result != CheckNodeGroups::Lose); signal->theData[1] = @@ -1335,7 +1335,7 @@ Qmgr::check_startup(Signal* signal) report_mask.assign(c_definedNodes); report_mask.bitANDC(c_start.m_starting_nodes); retVal = 1; - goto start_report; + goto check_log; } } @@ -1359,12 +1359,7 @@ Qmgr::check_startup(Signal* signal) case CheckNodeGroups::Partitioning: if (now < partitioned_timeout && result != CheckNodeGroups::Win) { - signal->theData[1] = c_restartPartionedTimeout == (Uint32) ~0 ? 4 : 5; - signal->theData[2] = Uint32((partitioned_timeout - now + 500) / 1000); - report_mask.assign(c_definedNodes); - report_mask.bitANDC(c_start.m_starting_nodes); - retVal = 0; - goto start_report; + goto missinglog; } // Fall through... case CheckNodeGroups::Win: @@ -1372,12 +1367,61 @@ Qmgr::check_startup(Signal* signal) all ? 0x8001 : (result == CheckNodeGroups::Win ? 0x8002 : 0x8003); report_mask.assign(c_definedNodes); report_mask.bitANDC(c_start.m_starting_nodes); - retVal = 1; - goto start_report; + retVal = 2; + goto check_log; } } ndbrequire(false); +check_log: + jam(); + { + Uint32 save[4+4*NdbNodeBitmask::Size]; + memcpy(save, signal->theData, sizeof(save)); + + signal->theData[0] = 0; + c_start.m_starting_nodes.copyto(NdbNodeBitmask::Size, signal->theData+1); + memcpy(signal->theData+1+NdbNodeBitmask::Size, c_start.m_node_gci, + 4*MAX_NDB_NODES); + EXECUTE_DIRECT(DBDIH, GSN_DIH_RESTARTREQ, signal, + 1+NdbNodeBitmask::Size+MAX_NDB_NODES); + + incompleteng = signal->theData[0]; + memcpy(signal->theData, save, sizeof(save)); + + if (incompleteng != MAX_NDB_NODES) + { + jam(); + if (retVal == 1) + { + jam(); + goto incomplete_log; + } + else if (retVal == 2) + { + if (now <= partitioned_timeout) + { + jam(); + goto missinglog; + } + else + { + goto incomplete_log; + } + } + ndbrequire(false); + } + } + goto start_report; + +missinglog: + signal->theData[1] = c_restartPartionedTimeout == (Uint32) ~0 ? 4 : 5; + signal->theData[2] = Uint32((partitioned_timeout - now + 500) / 1000); + report_mask.assign(c_definedNodes); + report_mask.bitANDC(c_start.m_starting_nodes); + retVal = 0; + goto start_report; + start_report: jam(); { @@ -1396,17 +1440,32 @@ start_report: missing_nodegroup: jam(); - char buf[100], mask1[100], mask2[100]; - c_start.m_starting_nodes.getText(mask1); - tmp.assign(c_start.m_starting_nodes); - tmp.bitANDC(c_start.m_starting_nodes_w_log); - tmp.getText(mask2); - BaseString::snprintf(buf, sizeof(buf), - "Unable to start missing node group! " - " starting: %s (missing fs for: %s)", - mask1, mask2); - progError(__LINE__, NDBD_EXIT_SR_RESTARTCONFLICT, buf); - return 0; // Deadcode + { + char buf[100], mask1[100], mask2[100]; + c_start.m_starting_nodes.getText(mask1); + tmp.assign(c_start.m_starting_nodes); + tmp.bitANDC(c_start.m_starting_nodes_w_log); + tmp.getText(mask2); + BaseString::snprintf(buf, sizeof(buf), + "Unable to start missing node group! " + " starting: %s (missing fs for: %s)", + mask1, mask2); + progError(__LINE__, NDBD_EXIT_INSUFFICENT_NODES, buf); + return 0; // Deadcode + } + +incomplete_log: + jam(); + { + char buf[100], mask1[100]; + c_start.m_starting_nodes.getText(mask1); + BaseString::snprintf(buf, sizeof(buf), + "Incomplete log for node group: %d! " + " starting nodes: %s", + incompleteng, mask1); + progError(__LINE__, NDBD_EXIT_INSUFFICENT_NODES, buf); + return 0; // Deadcode + } } void @@ -2734,7 +2793,7 @@ void Qmgr::execAPI_REGREQ(Signal* signal) "incompatible with %s", type == NodeInfo::API ? "api or mysqld" : "management server", apiNodePtr.i, - getVersionString(version,"",buf,sizeof(buf)), + ndbGetVersionString(version,"",buf,sizeof(buf)), NDB_VERSION_STRING); apiNodePtr.p->phase = ZAPI_INACTIVE; sendApiRegRef(signal, ref, ApiRegRef::UnsupportedVersion); @@ -2993,7 +3052,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, if (failedNodePtr.i == getOwnNodeId()) { jam(); - Uint32 code = 0; + Uint32 code = NDBD_EXIT_NODE_DECLARED_DEAD; const char * msg = 0; char extra[100]; switch(aFailCause){ @@ -3525,8 +3584,10 @@ void Qmgr::execCOMMIT_FAILREQ(Signal* signal) nodePtr.p->phase = ZFAIL_CLOSING; nodePtr.p->failState = WAITING_FOR_NDB_FAILCONF; setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0; + setNodeInfo(nodePtr.i).m_version = 0; c_clusterNodes.clear(nodePtr.i); }//for + recompute_version_info(NodeInfo::DB); /*----------------------------------------------------------------------*/ /* WE INFORM THE API'S WE HAVE CONNECTED ABOUT THE FAILED NODES. */ /*----------------------------------------------------------------------*/ @@ -4949,34 +5010,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal) #endif }//Qmgr::execDUMP_STATE_ORD() -void Qmgr::execSET_VAR_REQ(Signal* signal) -{ -#if 0 - SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0]; - ConfigParamId var = setVarReq->variable(); - UintR val = setVarReq->value(); - - switch (var) { - case HeartbeatIntervalDbDb: - setHbDelay(val/10); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case HeartbeatIntervalDbApi: - setHbApiDelay(val/10); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - case ArbitTimeout: - setArbitTimeout(val); - sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB); - break; - - default: - sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB); - }// switch -#endif -}//execSET_VAR_REQ() void Qmgr::execAPI_BROADCAST_REP(Signal* signal) diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index 2c204b912b1..efc4bc1948a 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -71,8 +71,6 @@ Restore::execSTTOR(Signal* signal) { jamEntry(); - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; c_lqh = (Dblqh*)globalData.getBlock(DBLQH); c_tup = (Dbtup*)globalData.getBlock(DBTUP); sendSTTORRY(signal); @@ -804,7 +802,6 @@ Restore::parse_table_description(Signal* signal, FilePtr file_ptr, return; } - Uint32 null_offset = 0; Column c; Uint32 colstore[sizeof(Column)/sizeof(Uint32)]; @@ -1274,7 +1271,7 @@ Restore::check_file_version(Signal* signal, Uint32 file_version) { char buf[255]; char verbuf[255]; - getVersionString(file_version, 0, verbuf, sizeof(verbuf)); + ndbGetVersionString(file_version, 0, verbuf, sizeof(verbuf)); BaseString::snprintf(buf, sizeof(buf), "Unsupported version of LCP files found on disk, " " found: %s", verbuf); diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 94df9a2b32e..7845b83693c 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -629,6 +629,8 @@ Suma::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) bool found = false; KeyTable<Table>::Iterator it; + LINT_INIT(it.bucket); + LINT_INIT(it.curr.p); for(c_tables.first(it);!it.isNull();c_tables.next(it)) { LocalDLList<Subscriber> subbs(c_subscriberPool,it.curr.p->c_subscribers); @@ -1297,7 +1299,7 @@ Suma::execSUB_SYNC_REQ(Signal* signal) jam(); syncPtr.p->m_tableList.append(&subPtr.p->m_tableId, 1); if(signal->getNoOfSections() > 0){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, SubSyncReq::ATTRIBUTE_LIST); LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList); append(attrBuf, ptr, getSectionSegmentPool()); @@ -1743,7 +1745,7 @@ Suma::execGET_TABINFO_CONF(Signal* signal){ Uint32 tableId = conf->tableId; TablePtr tabPtr; c_tablePool.getPtr(tabPtr, conf->senderData); - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO); ndbrequire(tabPtr.p->parseTable(ptr, *this)); releaseSections(signal); @@ -3477,6 +3479,7 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) const Uint32 hashValue = trg->getHashValue(); const Uint32 gci = trg->getGCI(); const Uint32 event = trg->getTriggerEvent(); + const Uint32 any_value = trg->getAnyValue(); TablePtr tabPtr; tabPtr.i = trigId & 0xFFFF; @@ -3517,7 +3520,7 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) data->requestInfo = 0; SubTableData::setOperation(data->requestInfo, event); data->logType = 0; - data->changeMask = 0; + data->anyValue = any_value; data->totalLen = ptrLen; { @@ -3535,13 +3538,15 @@ Suma::execFIRE_TRIG_ORD(Signal* signal) } else { + const uint buffer_header_sz = 4; Uint32* dst; - Uint32 sz = f_trigBufferSize + b_trigBufferSize + 3; + Uint32 sz = f_trigBufferSize + b_trigBufferSize + buffer_header_sz; if((dst = get_buffer_ptr(signal, bucket, gci, sz))) { * dst++ = tableId; * dst++ = tabPtr.p->m_schemaVersion; * dst++ = (event << 16) | f_trigBufferSize; + * dst++ = any_value; memcpy(dst, f_buffer, f_trigBufferSize << 2); dst += f_trigBufferSize; memcpy(dst, b_buffer, b_trigBufferSize << 2); @@ -4614,6 +4619,7 @@ Suma::execSUMA_HANDOVER_CONF(Signal* signal) { DBUG_VOID_RETURN; } +#ifdef NOT_USED static NdbOut& operator<<(NdbOut & out, const Suma::Page_pos & pos) @@ -4625,6 +4631,7 @@ operator<<(NdbOut & out, const Suma::Page_pos & pos) << " ]"; return out; } +#endif Uint32* Suma::get_buffer_ptr(Signal* signal, Uint32 buck, Uint32 gci, Uint32 sz) @@ -4716,9 +4723,7 @@ Suma::out_of_buffer(Signal* signal) m_out_of_buffer_gci = m_last_complete_gci - 1; infoEvent("Out of event buffer: nodefailure will cause event failures"); - signal->theData[0] = SumaContinueB::OUT_OF_BUFFER_RELEASE; - signal->theData[1] = 0; - sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB); + out_of_buffer_release(signal, 0); } void @@ -4786,7 +4791,8 @@ loop: Uint32 count; m_tup->allocConsPages(16, count, ref); - ndbrequire(count > 0); + if (count == 0) + return RNIL; ndbout_c("alloc_chunk(%d %d) - ", ref, count); @@ -4795,6 +4801,7 @@ loop: ptr.p->m_free = count; Buffer_page* page; + LINT_INIT(page); for(Uint32 i = 0; i<count; i++) { page = (Buffer_page*)m_tup->c_page_pool.getPtr(ref); @@ -5037,18 +5044,20 @@ Suma::resend_bucket(Signal* signal, Uint32 buck, Uint32 min_gci, } else { + const uint buffer_header_sz = 4; g_cnt++; Uint32 table = * src++ ; Uint32 schemaVersion = * src++; Uint32 event = * src >> 16; Uint32 sz_1 = (* src ++) & 0xFFFF; - - ndbassert(sz - 3 >= sz_1); + Uint32 any_value = * src++; + + ndbassert(sz - buffer_header_sz >= sz_1); LinearSectionPtr ptr[3]; const Uint32 nptr= reformat(signal, ptr, src, sz_1, - src + sz_1, sz - 3 - sz_1); + src + sz_1, sz - buffer_header_sz - sz_1); Uint32 ptrLen= 0; for(Uint32 i =0; i < nptr; i++) ptrLen+= ptr[i].sz; @@ -5066,7 +5075,7 @@ Suma::resend_bucket(Signal* signal, Uint32 buck, Uint32 min_gci, data->requestInfo = 0; SubTableData::setOperation(data->requestInfo, event); data->logType = 0; - data->changeMask = 0; + data->anyValue = any_value; data->totalLen = ptrLen; { diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 64f095860ec..675706d5431 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -149,6 +149,7 @@ public: */ struct Subscription { + Subscription() {} Uint32 m_senderRef; Uint32 m_senderData; Uint32 m_subscriptionId; diff --git a/storage/ndb/src/kernel/blocks/tsman.cpp b/storage/ndb/src/kernel/blocks/tsman.cpp index 353bfc6e3c5..3a7003d56c8 100644 --- a/storage/ndb/src/kernel/blocks/tsman.cpp +++ b/storage/ndb/src/kernel/blocks/tsman.cpp @@ -126,9 +126,6 @@ Tsman::execSTTOR(Signal* signal) { jamEntry(); - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; - sendSTTORRY(signal); return; @@ -1200,7 +1197,6 @@ Tsman::scan_extent_headers(Signal* signal, Ptr<Datafile> ptr) Uint32 firstFree= RNIL; Uint32 size = ptr.p->m_extent_size; Uint32 per_page = ptr.p->m_online.m_extent_headers_per_extent_page; - Uint32 SZ= File_formats::Datafile::EXTENT_HEADER_BITMASK_BITS_PER_PAGE; Uint32 pages= ptr.p->m_online.m_offset_data_pages - 1; Uint32 datapages= ptr.p->m_online.m_data_pages; Dbtup* tup= (Dbtup*)globalData.getBlock(DBTUP); @@ -1327,6 +1323,12 @@ Tsman::execDROP_FILE_REQ(Signal* signal) Local_datafile_list free(m_file_pool, fg_ptr.p->m_free_files); free.remove(file_ptr); } + else if(find_file_by_id(file_ptr, fg_ptr.p->m_meta_files, req.file_id)) + { + jam(); + Local_datafile_list meta(m_file_pool, fg_ptr.p->m_meta_files); + meta.remove(file_ptr); + } else { errorCode = DropFileImplRef::NoSuchFile; @@ -1501,6 +1503,12 @@ Tsman::execALLOC_EXTENT_REQ(Signal* signal) { jam(); err = AllocExtentReq::NoExtentAvailable; + Local_datafile_list full_tmp(m_file_pool, ts_ptr.p->m_full_files); + if (tmp.isEmpty() && full_tmp.isEmpty()) + { + jam(); + err = AllocExtentReq::NoDatafile; + } } /** diff --git a/storage/ndb/src/kernel/error/ErrorReporter.cpp b/storage/ndb/src/kernel/error/ErrorReporter.cpp index 3d1b7fad7f3..43307d43139 100644 --- a/storage/ndb/src/kernel/error/ErrorReporter.cpp +++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp @@ -24,6 +24,7 @@ #include <NdbHost.h> #include <NdbConfig.h> #include <Configuration.hpp> +#include "EventLogger.hpp" #include <NdbAutoPtr.hpp> @@ -39,7 +40,7 @@ static void dumpJam(FILE* jamStream, Uint32 thrdTheEmulatedJamIndex, Uint8 thrdTheEmulatedJam[]); - +extern EventLogger g_eventLogger; const char* ErrorReporter::formatTimeStampString(){ TimeModule DateTime; /* To create "theDateTimeString" */ @@ -196,6 +197,9 @@ ErrorReporter::handleError(int messageID, WriteMessage(messageID, problemData, objRef, theEmulatedJamIndex, theEmulatedJam); + g_eventLogger.info(problemData); + g_eventLogger.info(objRef); + childReportError(messageID); if(messageID == NDBD_EXIT_ERROR_INSERT){ diff --git a/storage/ndb/src/kernel/error/TimeModule.cpp b/storage/ndb/src/kernel/error/TimeModule.cpp index 1c01f91f86b..2be734842ba 100644 --- a/storage/ndb/src/kernel/error/TimeModule.cpp +++ b/storage/ndb/src/kernel/error/TimeModule.cpp @@ -18,7 +18,7 @@ #include <ndb_global.h> #include "TimeModule.hpp" -static const char* cMonth[] = { "x", "January", "February", "Mars", "April", "May", "June", +static const char* cMonth[] = { "x", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}; static const char* cDay[] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", diff --git a/storage/ndb/src/kernel/error/ndbd_exit_codes.c b/storage/ndb/src/kernel/error/ndbd_exit_codes.c index 14a55816036..b36ea3af8ee 100644 --- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c @@ -57,6 +57,8 @@ static const ErrStruct errArray[] = "error(s) on other node(s)"}, {NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. " "Please check if cluster is already running"}, + {NDBD_EXIT_NODE_DECLARED_DEAD, XAE, + "Node declared dead. See error log for details"}, {NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"}, {NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system " "restart, please investigate error(s) on other node(s)"}, @@ -160,6 +162,7 @@ static const ErrStruct errArray[] = {NDBD_EXIT_AFS_READ_UNDERFLOW , XFI, "Read underflow"}, {NDBD_EXIT_INVALID_LCP_FILE, XFI, "Invalid LCP" }, + {NDBD_EXIT_INSUFFICENT_NODES, XRE, "Insufficent nodes for system restart" }, /* Sentinel */ {0, XUE, diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index fbda9873fd8..72770d35cde 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -74,35 +74,35 @@ static struct my_option my_long_options[] = { "initial", OPT_INITIAL, "Perform initial start of ndbd, including cleaning the file system. " "Consult documentation before using this", - (gptr*) &_initial, (gptr*) &_initial, 0, + (uchar**) &_initial, (uchar**) &_initial, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nostart", 'n', "Don't start ndbd immediately. Ndbd will await command from ndb_mgmd", - (gptr*) &_no_start, (gptr*) &_no_start, 0, + (uchar**) &_no_start, (uchar**) &_no_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "daemon", 'd', "Start ndbd as daemon (default)", - (gptr*) &_daemon, (gptr*) &_daemon, 0, + (uchar**) &_daemon, (uchar**) &_daemon, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "nodaemon", OPT_NODAEMON, "Do not start ndbd as daemon, provided for testing purposes", - (gptr*) &_no_daemon, (gptr*) &_no_daemon, 0, + (uchar**) &_no_daemon, (uchar**) &_no_daemon, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "foreground", OPT_FOREGROUND, "Run real ndbd in foreground, provided for debugging purposes" " (implies --nodaemon)", - (gptr*) &_foreground, (gptr*) &_foreground, 0, + (uchar**) &_foreground, (uchar**) &_foreground, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nowait-nodes", OPT_NOWAIT_NODES, "Nodes that will not be waited for during start", - (gptr*) &_nowait_nodes, (gptr*) &_nowait_nodes, 0, + (uchar**) &_nowait_nodes, (uchar**) &_nowait_nodes, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "initial-start", OPT_INITIAL_START, "Perform initial start", - (gptr*) &_initialstart, (gptr*) &_initialstart, 0, + (uchar**) &_initialstart, (uchar**) &_initialstart, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "bind-address", OPT_NOWAIT_NODES, "Local bind address", - (gptr*) &_bind_address, (gptr*) &_bind_address, 0, + (uchar**) &_bind_address, (uchar**) &_bind_address, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/src/kernel/vm/DynArr256.cpp b/storage/ndb/src/kernel/vm/DynArr256.cpp index 2d9cc6869f4..4e73bb8830b 100644 --- a/storage/ndb/src/kernel/vm/DynArr256.cpp +++ b/storage/ndb/src/kernel/vm/DynArr256.cpp @@ -303,7 +303,6 @@ DynArr256::expand(Uint32 pos) Uint32 idx = 0; Uint32 alloc[5]; Uint32 sz = m_head.m_sz; - Uint32 shl = 0; for (; pos >= g_max_sizes[sz]; sz++); @@ -365,7 +364,6 @@ Uint32 DynArr256::release(ReleaseIterator &iter, Uint32 * retptr) { Uint32 sz = iter.m_sz; - Uint32 pos = iter.m_pos; Uint32 ptrI = iter.m_ptr_i[sz]; Uint32 page_no = ptrI >> DA256_BITS; Uint32 page_idx = ptrI & DA256_MASK; @@ -443,7 +441,6 @@ DynArr256::release(ReleaseIterator &iter, Uint32 * retptr) return 2; } -done: new (&m_head) Head(); return 0; diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index a78ee21fb8f..31f219718e5 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -18,7 +18,7 @@ #include <NdbTick.h> #include <kernel_types.h> -#include <ndb_version.h> +#include <util/version.h> #include <ndb_limits.h> #include "VMSignal.hpp" diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc.cpp index 9386e3c7cd3..21a26ff11d8 100644 --- a/storage/ndb/src/kernel/vm/ndbd_malloc.cpp +++ b/storage/ndb/src/kernel/vm/ndbd_malloc.cpp @@ -22,12 +22,14 @@ #include <stdio.h> #endif +#ifdef TRACE_MALLOC static void xxx(size_t size, size_t *s_m, size_t *s_k, size_t *s_b) { *s_m = size/1024/1024; *s_k = (size - *s_m*1024*1024)/1024; *s_b = size - *s_m*1024*1024-*s_k*1024; } +#endif static Uint64 g_allocated_memory; void *ndbd_malloc(size_t size) diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp index 059ecd81c4d..70637a362d0 100644 --- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp +++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp @@ -220,7 +220,7 @@ Ndbd_mem_manager::init(bool alloc_less_memory) while (cnt < MAX_CHUNKS && allocated < pages) { InitChunk chunk; - Uint32 remaining = pages - allocated; + LINT_INIT(chunk.m_start); #if defined(_lint) || defined(FORCE_INIT_OF_VARS) memset((char*) &chunk, 0 , sizeof(chunk)); diff --git a/storage/ndb/src/libndb.ver.in b/storage/ndb/src/libndb.ver.in new file mode 100644 index 00000000000..72bf93d196f --- /dev/null +++ b/storage/ndb/src/libndb.ver.in @@ -0,0 +1,2 @@ +libndbclient_@NDB_SHARED_LIB_MAJOR_VERSION@ { global: *; }; + diff --git a/storage/ndb/src/mgmapi/LocalConfig.cpp b/storage/ndb/src/mgmapi/LocalConfig.cpp index f01b6ff3da3..476e2d6dd84 100644 --- a/storage/ndb/src/mgmapi/LocalConfig.cpp +++ b/storage/ndb/src/mgmapi/LocalConfig.cpp @@ -73,9 +73,9 @@ LocalConfig::init(const char *connectString, //4. Check Ndb.cfg in NDB_HOME { bool fopenError; - char *buf= NdbConfig_NdbCfgName(1 /*true*/); - NdbAutoPtr<char> tmp_aptr(buf); - if(readFile(buf, fopenError)) + char *buf2= NdbConfig_NdbCfgName(1 /*true*/); + NdbAutoPtr<char> tmp_aptr(buf2); + if(readFile(buf2, fopenError)) DBUG_RETURN(true); if (!fopenError) DBUG_RETURN(false); @@ -84,9 +84,9 @@ LocalConfig::init(const char *connectString, //5. Check Ndb.cfg in cwd { bool fopenError; - char *buf= NdbConfig_NdbCfgName(0 /*false*/); - NdbAutoPtr<char> tmp_aptr(buf); - if(readFile(buf, fopenError)) + char *buf2= NdbConfig_NdbCfgName(0 /*false*/); + NdbAutoPtr<char> tmp_aptr(buf2); + if(readFile(buf2, fopenError)) DBUG_RETURN(true); if (!fopenError) DBUG_RETURN(false); @@ -94,9 +94,9 @@ LocalConfig::init(const char *connectString, //7. Check { - char buf[256]; - BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_PORT); - if(readConnectString(buf, "default connect string")) + char buf2[256]; + BaseString::snprintf(buf2, sizeof(buf2), "host=localhost:%s", NDB_PORT); + if(readConnectString(buf2, "default connect string")) DBUG_RETURN(true); } diff --git a/storage/ndb/src/mgmapi/Makefile.am b/storage/ndb/src/mgmapi/Makefile.am index 518dac8b3ef..96ee65d7d9e 100644 --- a/storage/ndb/src/mgmapi/Makefile.am +++ b/storage/ndb/src/mgmapi/Makefile.am @@ -13,13 +13,16 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +MYSQLCLUSTERdir= . + noinst_LTLIBRARIES = libmgmapi.la -libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ParamInfo.cpp +libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ConfigInfo.cpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi -DEFS_LOC = -DNO_DEBUG_MESSAGES -DNDB_PORT="\"@ndb_port@\"" +DEFS_LOC = -DNDB_MGMAPI -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\"" \ + -DNO_DEBUG_MESSAGES -DNDB_PORT="\"@ndb_port@\"" include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_util.mk.am diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index 486d7fec1a1..662e5c22f48 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -91,9 +91,7 @@ struct ndb_mgm_handle { int last_error; int last_error_line; char last_error_desc[NDB_MGM_MAX_ERR_DESC_SIZE]; - int read_timeout; - int write_timeout; - unsigned int connect_timeout; + unsigned int timeout; NDB_SOCKET_TYPE socket; @@ -137,18 +135,41 @@ setError(NdbMgmHandle h, int error, int error_line, const char * msg, ...){ return ret; \ } -#define CHECK_REPLY(reply, ret) \ +#define CHECK_REPLY(handle, reply, ret) \ if(reply == NULL) { \ - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ + if(!handle->last_error) \ + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ return ret; \ } -#define DBUG_CHECK_REPLY(reply, ret) \ +#define DBUG_CHECK_REPLY(handle, reply, ret) \ if (reply == NULL) { \ - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ + if(!handle->last_error) \ + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, ""); \ DBUG_RETURN(ret); \ } +#define CHECK_TIMEDOUT(in, out) \ + if(in.timedout() || out.timedout()) \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); + +#define CHECK_TIMEDOUT_RET(h, in, out, ret) \ + if(in.timedout() || out.timedout()) { \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); \ + ndb_mgm_disconnect_quiet(h); \ + return ret; \ + } + +#define DBUG_CHECK_TIMEDOUT_RET(h, in, out, ret) \ + if(in.timedout() || out.timedout()) { \ + SET_ERROR(handle, ETIMEDOUT, \ + "Time out talking to management server"); \ + ndb_mgm_disconnect_quiet(h); \ + DBUG_RETURN(ret); \ + } + /***************************************************************************** * Handles *****************************************************************************/ @@ -164,9 +185,7 @@ ndb_mgm_create_handle() h->last_error = 0; h->last_error_line = 0; h->socket = NDB_INVALID_SOCKET; - h->read_timeout = 50000; - h->write_timeout = 100; - h->connect_timeout = 0; + h->timeout = 60000; h->cfg_i = -1; h->errstream = stdout; h->m_name = 0; @@ -323,8 +342,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, DBUG_ENTER("ndb_mgm_call"); DBUG_PRINT("enter",("handle->socket: %d, cmd: %s", handle->socket, cmd)); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); out.println(cmd); #ifdef MGMAPI_LOG @@ -375,6 +394,8 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, } out.println(""); + DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL); + Parser_t::Context ctx; ParserDummy session(handle->socket); Parser_t parser(command_reply, in, true, true, true); @@ -382,14 +403,17 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, const Properties* p = parser.parse(ctx, session); if (p == NULL){ if(!ndb_mgm_is_connected(handle)) { + DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(NULL); } else { + DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL); if(ctx.m_status==Parser_t::Eof || ctx.m_status==Parser_t::NoLine) { ndb_mgm_disconnect(handle); + DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(NULL); } /** @@ -411,6 +435,10 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply, p->print(handle->logfile, "IN: "); } #endif + + if(p && (in.timedout() || out.timedout())) + delete p; + DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL); DBUG_RETURN(p); } @@ -437,13 +465,36 @@ int ndb_mgm_is_connected(NdbMgmHandle handle) extern "C" int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds) { + return ndb_mgm_set_timeout(handle, seconds*1000); + return 0; +} + +extern "C" +int ndb_mgm_set_timeout(NdbMgmHandle handle, unsigned int timeout_ms) +{ if(!handle) return -1; - handle->connect_timeout= seconds; + handle->timeout= timeout_ms; return 0; } +extern "C" +int ndb_mgm_number_of_mgmd_in_connect_string(NdbMgmHandle handle) +{ + int count=0; + Uint32 i; + LocalConfig &cfg= handle->cfg; + + for (i = 0; i < cfg.ids.size(); i++) + { + if (cfg.ids[i].type != MgmId_TCP) + continue; + count++; + } + return count; +} + /** * Connect to a management server */ @@ -472,9 +523,8 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, LocalConfig &cfg= handle->cfg; NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET; Uint32 i; - int binderror = 0; SocketClient s(0, 0); - s.set_connect_timeout(handle->connect_timeout); + s.set_connect_timeout((handle->timeout+999)/1000); if (!s.init()) { fprintf(handle->errstream, @@ -595,6 +645,22 @@ ndb_mgm_get_fd(NdbMgmHandle handle) } /** + * Disconnect from mgm server without error checking + * Should be used internally only. + * e.g. on timeout, we leave NdbMgmHandle disconnected + */ +extern "C" +int +ndb_mgm_disconnect_quiet(NdbMgmHandle handle) +{ + NDB_CLOSE_SOCKET(handle->socket); + handle->socket = NDB_INVALID_SOCKET; + handle->connected = 0; + + return 0; +} + +/** * Disconnect from a mgm server */ extern "C" @@ -605,11 +671,7 @@ ndb_mgm_disconnect(NdbMgmHandle handle) CHECK_HANDLE(handle, -1); CHECK_CONNECTED(handle, -1); - NDB_CLOSE_SOCKET(handle->socket); - handle->socket = NDB_INVALID_SOCKET; - handle->connected = 0; - - return 0; + return ndb_mgm_disconnect_quiet(handle); } struct ndb_mgm_type_atoi @@ -772,24 +834,30 @@ ndb_mgm_get_status(NdbMgmHandle handle) CHECK_HANDLE(handle, NULL); CHECK_CONNECTED(handle, NULL); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); out.println("get status"); out.println(""); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); + char buf[1024]; if(!in.gets(buf, sizeof(buf))) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, "Probably disconnected"); return NULL; } if(strcmp("node status\n", buf) != 0) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); + ndbout << in.timedout() << " " << out.timedout() << buf << endl; SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, buf); return NULL; } if(!in.gets(buf, sizeof(buf))) { + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, "Probably disconnected"); return NULL; } @@ -798,6 +866,7 @@ ndb_mgm_get_status(NdbMgmHandle handle) Vector<BaseString> split; tmp.split(split, ":"); if(split.size() != 2){ + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, buf); return NULL; } @@ -832,8 +901,12 @@ ndb_mgm_get_status(NdbMgmHandle handle) if(!in.gets(buf, sizeof(buf))) { free(state); - SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, - "Probably disconnected"); + if(in.timedout() || out.timedout()) + SET_ERROR(handle, ETIMEDOUT, + "Time out talking to management server"); + else + SET_ERROR(handle, NDB_MGM_ILLEGAL_SERVER_REPLY, + "Probably disconnected"); return NULL; } tmp.assign(buf); @@ -842,12 +915,12 @@ ndb_mgm_get_status(NdbMgmHandle handle) break; } - Vector<BaseString> split; - tmp.split(split, ":.", 4); - if(split.size() != 4) + Vector<BaseString> split2; + tmp.split(split2, ":.", 4); + if(split2.size() != 4) break; - const int id = atoi(split[1].c_str()); + const int id = atoi(split2[1].c_str()); if(id != nodeId){ ptr++; i++; @@ -855,15 +928,16 @@ ndb_mgm_get_status(NdbMgmHandle handle) ptr->node_id = id; } - split[3].trim(" \t\n"); + split2[3].trim(" \t\n"); - if(status_ackumulate(ptr,split[2].c_str(), split[3].c_str()) != 0) { + if(status_ackumulate(ptr,split2[2].c_str(), split2[3].c_str()) != 0) { break; } } if(i+1 != noOfNodes){ free(state); + CHECK_TIMEDOUT_RET(handle, in, out, NULL); SET_ERROR(handle, NDB_MGM_ILLEGAL_NODE_STATUS, "Node count mismatch"); return NULL; } @@ -892,7 +966,7 @@ ndb_mgm_enter_single_user(NdbMgmHandle handle, args.put("nodeId", nodeId); const Properties *reply; reply = ndb_mgm_call(handle, enter_single_reply, "enter single user", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -923,7 +997,7 @@ ndb_mgm_exit_single_user(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/) const Properties *reply; reply = ndb_mgm_call(handle, exit_single_reply, "exit single user", 0); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); const char * buf; reply->get("result", &buf); @@ -1020,7 +1094,7 @@ ndb_mgm_stop3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, reply = ndb_mgm_call(handle, stop_reply_v2, "stop all", &args); else reply = ndb_mgm_call(handle, stop_reply_v1, "stop all", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); if(!reply->get("stopped", &stoppedNoOfNodes)){ SET_ERROR(handle, NDB_MGM_STOP_FAILED, @@ -1062,7 +1136,7 @@ ndb_mgm_stop3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, else reply = ndb_mgm_call(handle, stop_reply_v1, "stop", &args); - CHECK_REPLY(reply, stoppedNoOfNodes); + CHECK_REPLY(handle, reply, stoppedNoOfNodes); if(!reply->get("stopped", &stoppedNoOfNodes)){ SET_ERROR(handle, NDB_MGM_STOP_FAILED, "Could not get number of stopped nodes from mgm server"); @@ -1161,11 +1235,11 @@ ndb_mgm_restart3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, args.put("initialstart", initial); args.put("nostart", nostart); const Properties *reply; - const int timeout = handle->read_timeout; - handle->read_timeout= 5*60*1000; // 5 minutes + const int timeout = handle->timeout; + handle->timeout= 5*60*1000; // 5 minutes reply = ndb_mgm_call(handle, restart_reply_v1, "restart all", &args); - handle->read_timeout= timeout; - CHECK_REPLY(reply, -1); + handle->timeout= timeout; + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -1197,13 +1271,13 @@ ndb_mgm_restart3(NdbMgmHandle handle, int no_of_nodes, const int * node_list, args.put("nostart", nostart); const Properties *reply; - const int timeout = handle->read_timeout; - handle->read_timeout= 5*60*1000; // 5 minutes + const int timeout = handle->timeout; + handle->timeout= 5*60*1000; // 5 minutes if(use_v2) reply = ndb_mgm_call(handle, restart_reply_v2, "restart node v2", &args); else reply = ndb_mgm_call(handle, restart_reply_v1, "restart node", &args); - handle->read_timeout= timeout; + handle->timeout= timeout; if(reply != NULL) { BaseString result; reply->get("result", result); @@ -1292,7 +1366,7 @@ ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle, Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); for(unsigned int i=0; i < severity_size; i++) { reply->get(clusterlog_severity_names[severity[i].category], &severity[i].value); @@ -1323,7 +1397,7 @@ ndb_mgm_get_clusterlog_severity_filter_old(NdbMgmHandle handle) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args); - CHECK_REPLY(reply, NULL); + CHECK_REPLY(handle, reply, NULL); for(int i=0; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) { reply->get(clusterlog_severity_names[i], &enabled[i]); @@ -1355,7 +1429,7 @@ ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle, const Properties *reply; reply = ndb_mgm_call(handle, filter_reply, "set logfilter", &args); - CHECK_REPLY(reply, retval); + CHECK_REPLY(handle, reply, retval); BaseString result; reply->get("result", result); @@ -1449,7 +1523,7 @@ ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle, Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getloglevel_reply, "get cluster loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); for(int i=0; i < loglevel_count; i++) { reply->get(clusterlog_names[loglevel[i].category], &loglevel[i].value); @@ -1485,7 +1559,7 @@ ndb_mgm_get_clusterlog_loglevel_old(NdbMgmHandle handle) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, getloglevel_reply, "get cluster loglevel", &args); - CHECK_REPLY(reply, NULL); + CHECK_REPLY(handle, reply, NULL); for(int i=0; i < loglevel_count; i++) { reply->get(clusterlog_names[i], &loglevel[i]); @@ -1518,7 +1592,7 @@ ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle, int nodeId, const Properties *reply; reply = ndb_mgm_call(handle, clusterlog_reply, "set cluster loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); DBUG_ENTER("ndb_mgm_set_clusterlog_loglevel"); DBUG_PRINT("enter",("node=%d, category=%d, level=%d", nodeId, cat, level)); @@ -1556,7 +1630,7 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId, args.put("level", level); const Properties *reply; reply = ndb_mgm_call(handle, loglevel_reply, "set loglevel", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -1615,7 +1689,7 @@ ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[], if(reply == NULL) { close(sockfd); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); } delete reply; return sockfd; @@ -1659,7 +1733,7 @@ ndb_mgm_dump_state(NdbMgmHandle handle, int nodeId, const int * _args, const Properties *prop; prop = ndb_mgm_call(handle, dump_state_reply, "dump state", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); BaseString result; prop->get("result", result); @@ -1696,6 +1770,7 @@ ndb_mgm_start_signallog(NdbMgmHandle handle, int nodeId, start_signallog_reply, "start signallog", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1732,6 +1807,7 @@ ndb_mgm_stop_signallog(NdbMgmHandle handle, int nodeId, const Properties *prop; prop = ndb_mgm_call(handle, stop_signallog_reply, "stop signallog", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1796,6 +1872,7 @@ ndb_mgm_log_signals(NdbMgmHandle handle, int nodeId, const Properties *prop; prop = ndb_mgm_call(handle, stop_signallog_reply, "log signals", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1833,6 +1910,7 @@ ndb_mgm_set_trace(NdbMgmHandle handle, int nodeId, int traceNumber, const Properties *prop; prop = ndb_mgm_call(handle, set_trace_reply, "set trace", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1870,6 +1948,7 @@ ndb_mgm_insert_error(NdbMgmHandle handle, int nodeId, int errorCode, const Properties *prop; prop = ndb_mgm_call(handle, insert_error_reply, "insert error", &args); + CHECK_REPLY(handle, prop, -1); if(prop != NULL) { BaseString result; @@ -1910,7 +1989,7 @@ ndb_mgm_start(NdbMgmHandle handle, int no_of_nodes, const int * node_list) Properties args; const Properties *reply; reply = ndb_mgm_call(handle, start_reply, "start all", &args); - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); Uint32 count = 0; if(!reply->get("started", &count)){ @@ -1968,15 +2047,15 @@ ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed, args.put("completed", wait_completed); const Properties *reply; { // start backup can take some time, set timeout high - Uint64 old_timeout= handle->read_timeout; + Uint64 old_timeout= handle->timeout; if (wait_completed == 2) - handle->read_timeout= 48*60*60*1000; // 48 hours + handle->timeout= 48*60*60*1000; // 48 hours else if (wait_completed == 1) - handle->read_timeout= 10*60*1000; // 10 minutes + handle->timeout= 10*60*1000; // 10 minutes reply = ndb_mgm_call(handle, start_backup_reply, "start backup", &args); - handle->read_timeout= old_timeout; + handle->timeout= old_timeout; } - CHECK_REPLY(reply, -1); + CHECK_REPLY(handle, reply, -1); BaseString result; reply->get("result", result); @@ -2010,7 +2089,7 @@ ndb_mgm_abort_backup(NdbMgmHandle handle, unsigned int backupId, const Properties *prop; prop = ndb_mgm_call(handle, stop_backup_reply, "abort backup", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); const char * buf; prop->get("result", &buf); @@ -2027,7 +2106,7 @@ ndb_mgm_abort_backup(NdbMgmHandle handle, unsigned int backupId, extern "C" struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { - + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_configuration"); CHECK_HANDLE(handle, 0); CHECK_CONNECTED(handle, 0); @@ -2045,7 +2124,7 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { const Properties *prop; prop = ndb_mgm_call(handle, reply, "get config", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); do { const char * buf; @@ -2081,10 +2160,15 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) { int read = 0; size_t start = 0; do { - if((read = read_socket(handle->socket, handle->read_timeout, - &buf64[start], len-start)) == -1){ - delete[] buf64; + if((read = read_socket(handle->socket, handle->timeout, + &buf64[start], len-start)) < 1){ + delete[] buf64; buf64 = 0; + if(read==0) + SET_ERROR(handle, ETIMEDOUT, "Timeout reading packed config"); + else + SET_ERROR(handle, errno, "Error reading packed config"); + ndb_mgm_disconnect_quiet(handle); break; } start += read; @@ -2205,7 +2289,7 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype, const Properties *prop; prop= ndb_mgm_call(handle, reply, "get nodeid", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); nodeid= -1; do { @@ -2234,43 +2318,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype, return nodeid; } -/***************************************************************************** - * Global Replication - ******************************************************************************/ -extern "C" -int -ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request, - unsigned int* replication_id, - struct ndb_mgm_reply* /*reply*/) -{ - SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command"); - const ParserRow<ParserDummy> replication_reply[] = { - MGM_CMD("global replication reply", NULL, ""), - MGM_ARG("result", String, Mandatory, "Error message"), - MGM_ARG("id", Int, Optional, "Id of global replication"), - MGM_END() - }; - CHECK_HANDLE(handle, -1); - CHECK_CONNECTED(handle, -1); - - Properties args; - args.put("request", request); - const Properties *reply; - reply = ndb_mgm_call(handle, replication_reply, "rep", &args); - CHECK_REPLY(reply, -1); - - const char * result; - reply->get("result", &result); - reply->get("id", replication_id); - if(strcmp(result,"Ok")!=0) { - delete reply; - return -1; - } - - delete reply; - return 0; -} - extern "C" int ndb_mgm_set_int_parameter(NdbMgmHandle handle, @@ -2294,7 +2341,7 @@ ndb_mgm_set_int_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); - CHECK_REPLY(prop, -1); + CHECK_REPLY(handle, prop, -1); int res= -1; do { @@ -2333,7 +2380,8 @@ ndb_mgm_set_int64_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); - + CHECK_REPLY(handle, prop, 0); + if(prop == NULL) { SET_ERROR(handle, EIO, "Unable set parameter"); return -1; @@ -2376,6 +2424,7 @@ ndb_mgm_set_string_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set parameter", &args); + CHECK_REPLY(handle, prop, 0); if(prop == NULL) { SET_ERROR(handle, EIO, "Unable set parameter"); @@ -2413,7 +2462,8 @@ ndb_mgm_purge_stale_sessions(NdbMgmHandle handle, char **purged){ const Properties *prop; prop= ndb_mgm_call(handle, reply, "purge stale sessions", &args); - + CHECK_REPLY(handle, prop, -1); + if(prop == NULL) { SET_ERROR(handle, EIO, "Unable to purge stale sessions"); return -1; @@ -2443,8 +2493,8 @@ int ndb_mgm_check_connection(NdbMgmHandle handle){ CHECK_HANDLE(handle, 0); CHECK_CONNECTED(handle, 0); - SocketOutputStream out(handle->socket); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketOutputStream out(handle->socket, handle->timeout); + SocketInputStream in(handle->socket, handle->timeout); char buf[32]; if (out.println("check connection")) goto ndb_mgm_check_connection_error; @@ -2498,7 +2548,7 @@ ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle, const Properties *prop; prop= ndb_mgm_call(handle, reply, "set connection parameter", &args); - DBUG_CHECK_REPLY(prop, -1); + DBUG_CHECK_REPLY(handle, prop, -1); int res= -1; do { @@ -2540,7 +2590,7 @@ ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get connection parameter", &args); - DBUG_CHECK_REPLY(prop, -3); + DBUG_CHECK_REPLY(handle, prop, -3); int res= -1; do { @@ -2573,7 +2623,7 @@ ndb_mgm_convert_to_transporter(NdbMgmHandle *handle) (*handle)->connected= 0; // we pretend we're disconnected s= (*handle)->socket; - SocketOutputStream s_output(s); + SocketOutputStream s_output(s, (*handle)->timeout); s_output.println("transporter connect"); s_output.println(""); @@ -2602,7 +2652,7 @@ ndb_mgm_get_mgmd_nodeid(NdbMgmHandle handle) const Properties *prop; prop = ndb_mgm_call(handle, reply, "get mgmd nodeid", &args); - DBUG_CHECK_REPLY(prop, 0); + DBUG_CHECK_REPLY(handle, prop, 0); if(!prop->get("nodeid",&nodeid)){ fprintf(handle->errstream, "Unable to get value\n"); @@ -2637,7 +2687,7 @@ int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length) const Properties *prop; prop = ndb_mgm_call(handle, reply, "report event", &args); - DBUG_CHECK_REPLY(prop, -1); + DBUG_CHECK_REPLY(handle, prop, -1); DBUG_RETURN(0); } @@ -2649,13 +2699,14 @@ int ndb_mgm_end_session(NdbMgmHandle handle) CHECK_CONNECTED(handle, 0); DBUG_ENTER("ndb_mgm_end_session"); - SocketOutputStream s_output(handle->socket); + SocketOutputStream s_output(handle->socket, handle->timeout); s_output.println("end session"); s_output.println(""); - SocketInputStream in(handle->socket, handle->read_timeout); + SocketInputStream in(handle->socket, handle->timeout); char buf[32]; in.gets(buf, sizeof(buf)); + CHECK_TIMEDOUT_RET(handle, in, s_output, -1); DBUG_RETURN(0); } @@ -2681,7 +2732,7 @@ int ndb_mgm_get_version(NdbMgmHandle handle, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get version", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); Uint32 id; if(!prop->get("id",&id)){ @@ -2732,7 +2783,7 @@ ndb_mgm_get_session_id(NdbMgmHandle handle) const Properties *prop; prop = ndb_mgm_call(handle, reply, "get session id", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); if(!prop->get("id",&session_id)){ fprintf(handle->errstream, "Unable to get session id\n"); @@ -2769,7 +2820,7 @@ ndb_mgm_get_session(NdbMgmHandle handle, Uint64 id, const Properties *prop; prop = ndb_mgm_call(handle, reply, "get session", &args); - CHECK_REPLY(prop, 0); + CHECK_REPLY(handle, prop, 0); Uint64 r_id; int rlen= 0; diff --git a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp index bfdc29b599f..553b079a22f 100644 --- a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp +++ b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp @@ -16,10 +16,7 @@ #include <ndb_types.h> #include <mgmapi.h> #include "mgmapi_configuration.hpp" -#include "../mgmsrv/ParamInfo.hpp" - -extern const ParamInfo ParamInfoArray[]; -extern const int ParamInfoNum; +#include "../mgmsrv/ConfigInfo.hpp" ndb_mgm_configuration_iterator::ndb_mgm_configuration_iterator (const ndb_mgm_configuration & conf, unsigned type_of_section) @@ -187,18 +184,19 @@ ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info, return -1; } - for (int i = 0; i < ParamInfoNum; i++) { - if (paramId == ParamInfoArray[i]._paramId && strcmp(DB_TOKEN, ParamInfoArray[i]._section) == 0) { + ConfigInfo data; + for (int i = 0; i < data.m_NoOfParams; i++) { + if (paramId == data.m_ParamInfo[i]._paramId && strcmp("DB", data.m_ParamInfo[i]._section) == 0) { size_t tmp = 0; if (tmp + sizeof(info->m_id) <= *size) { - info->m_id = ParamInfoArray[i]._paramId; + info->m_id = data.m_ParamInfo[i]._paramId; tmp += sizeof(info->m_id); } if (tmp + sizeof(info->m_name) <= *size) { - info->m_name = ParamInfoArray[i]._fname; + info->m_name = data.m_ParamInfo[i]._fname; tmp += sizeof(info->m_name); } diff --git a/storage/ndb/src/mgmapi/mgmapi_internal.h b/storage/ndb/src/mgmapi/mgmapi_internal.h index d30be221dcd..192bc57afd9 100644 --- a/storage/ndb/src/mgmapi/mgmapi_internal.h +++ b/storage/ndb/src/mgmapi/mgmapi_internal.h @@ -68,6 +68,8 @@ extern "C" { */ NDB_SOCKET_TYPE ndb_mgm_convert_to_transporter(NdbMgmHandle *handle); + int ndb_mgm_disconnect_quiet(NdbMgmHandle handle); + #ifdef __cplusplus } #endif diff --git a/storage/ndb/src/mgmapi/ndb_logevent.cpp b/storage/ndb/src/mgmapi/ndb_logevent.cpp index 9963c24ce40..ed72db297ab 100644 --- a/storage/ndb/src/mgmapi/ndb_logevent.cpp +++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp @@ -389,14 +389,18 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, struct ndb_logevent *dst, unsigned timeout_in_milliseconds) { + if (timeout_in_milliseconds == 0) + { + int res; + while ((res = ndb_logevent_get_next(h, dst, 60000))==0); + return res; + } + SocketInputStream in(h->socket, timeout_in_milliseconds); Properties p; char buf[256]; - struct timeval start_time; - gettimeofday(&start_time, 0); - /* header */ while (1) { if (in.gets(buf,sizeof(buf)) == 0) @@ -409,24 +413,15 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, // timed out return 0; } + if ( strcmp("log event reply\n", buf) == 0 ) break; if ( strcmp("<PING>\n", buf) ) ndbout_c("skipped: %s", buf); - struct timeval now; - gettimeofday(&now, 0); - unsigned elapsed_ms= (now.tv_sec-start_time.tv_sec)*1000 + - ((signed int)now.tv_usec-(signed int)start_time.tv_usec)/1000; - - if (elapsed_ms >= timeout_in_milliseconds) - { - // timed out - return 0; - } - - new (&in) SocketInputStream(h->socket, timeout_in_milliseconds-elapsed_ms); + if(in.timedout()) + return 0; } /* read name-value pairs into properties object */ @@ -437,11 +432,9 @@ int ndb_logevent_get_next(const NdbLogEventHandle h, h->m_error= NDB_LEH_READ_ERROR; return -1; } - if ( buf[0] == 0 ) - { - // timed out + if (in.timedout()) return 0; - } + if ( buf[0] == '\n' ) { break; diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index bb9ef764109..9e8910c9649 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -18,6 +18,7 @@ #include <Vector.hpp> #include <mgmapi.h> #include <util/BaseString.hpp> +#include <ndbd_exit_codes.h> class MgmtSrvr; @@ -268,8 +269,8 @@ static const char* helpText = "CLUSTERLOG TOGGLE [<severity>] ... Toggle severity filter on/off\n" "CLUSTERLOG INFO Print cluster log information\n" "<id> START Start data node (started with -n)\n" -"<id> RESTART [-n] [-i] Restart data or management server node\n" -"<id> STOP Stop data or management server node\n" +"<id> RESTART [-n] [-i] [-a] Restart data or management server node\n" +"<id> STOP [-a] Stop data or management server node\n" "ENTER SINGLE USER MODE <id> Enter single user mode\n" "EXIT SINGLE USER MODE Exit single user mode\n" "<id> STATUS Print status\n" @@ -433,7 +434,7 @@ static const char* helpTextRestart = " NDB Cluster -- Management Client -- Help for RESTART command\n" "---------------------------------------------------------------------------\n" "RESTART Restart data or management server node\n\n" -"<id> RESTART [-n] [-i] \n" +"<id> RESTART [-n] [-i] [-a]\n" " Restart the data or management node <id>(or All data nodes).\n\n" " -n (--nostart) restarts the node but does not\n" " make it join the cluster. Use '<id> START' to\n" @@ -444,6 +445,7 @@ static const char* helpTextRestart = " in the same node group during start up.\n\n" " Consult the documentation before using -i.\n\n" " INCORRECT USE OF -i WILL CAUSE DATA LOSS!\n" +" -a Aborts the node, not syncing GCP.\n" ; static const char* helpTextStop = @@ -451,10 +453,11 @@ static const char* helpTextStop = " NDB Cluster -- Management Client -- Help for STOP command\n" "---------------------------------------------------------------------------\n" "STOP Stop data or management server node\n\n" -"<id> STOP Stop the data or management server node <id>.\n\n" +"<id> STOP [-a] Stop the data or management server node <id>.\n\n" " ALL STOP will just stop all data nodes.\n\n" " If you desire to also shut down management servers,\n" -" use SHUTDOWN instead.\n" +" use SHUTDOWN instead.\n" +" -a Aborts the node, not syncing GCP.\n" ; static const char* helpTextEnterSingleUserMode = @@ -704,6 +707,133 @@ CommandInterpreter::printError() } } +/* + * print log event from mgmsrv to console screen + */ +#define make_uint64(a,b) (((Uint64)(a)) + (((Uint64)(b)) << 32)) +#define Q64(a) make_uint64(event->EVENT.a ## _lo, event->EVENT.a ## _hi) +#define R event->source_nodeid +#define Q(a) event->EVENT.a +#define QVERSION getMajor(Q(version)), getMinor(Q(version)), getBuild(Q(version)) +#define NDB_LE_(a) NDB_LE_ ## a +static void +printLogEvent(struct ndb_logevent* event) +{ + switch (event->type) { + /** + * NDB_MGM_EVENT_CATEGORY_BACKUP + */ +#undef EVENT +#define EVENT BackupStarted + case NDB_LE_BackupStarted: + ndbout_c("Node %u: Backup %d started from node %d", + R, Q(backup_id), Q(starting_node)); + break; +#undef EVENT +#define EVENT BackupFailedToStart + case NDB_LE_BackupFailedToStart: + ndbout_c("Node %u: Backup request from %d failed to start. Error: %d", + R, Q(starting_node), Q(error)); + break; +#undef EVENT +#define EVENT BackupCompleted + case NDB_LE_BackupCompleted: + ndbout_c("Node %u: Backup %u started from node %u completed\n" + " StartGCP: %u StopGCP: %u\n" + " #Records: %u #LogRecords: %u\n" + " Data: %u bytes Log: %u bytes", R, + Q(backup_id), Q(starting_node), + Q(start_gci), Q(stop_gci), + Q(n_records), Q(n_log_records), + Q(n_bytes), Q(n_log_bytes)); + break; +#undef EVENT +#define EVENT BackupAborted + case NDB_LE_BackupAborted: + ndbout_c("Node %u: Backup %d started from %d has been aborted. Error: %d", + R, Q(backup_id), Q(starting_node), Q(error)); + break; + /** + * NDB_MGM_EVENT_CATEGORY_STARTUP + */ +#undef EVENT +#define EVENT NDBStartStarted + case NDB_LE_NDBStartStarted: + ndbout_c("Node %u: Start initiated (version %d.%d.%d)", + R, QVERSION); + break; +#undef EVENT +#define EVENT NDBStartCompleted + case NDB_LE_NDBStartCompleted: + ndbout_c("Node %u: Started (version %d.%d.%d)", + R, QVERSION); + break; +#undef EVENT +#define EVENT NDBStopStarted + case NDB_LE_NDBStopStarted: + ndbout_c("Node %u: %s shutdown initiated", R, + (Q(stoptype) == 1 ? "Cluster" : "Node")); + break; +#undef EVENT +#define EVENT NDBStopCompleted + case NDB_LE_NDBStopCompleted: + { + BaseString action_str(""); + BaseString signum_str(""); + getRestartAction(Q(action), action_str); + if (Q(signum)) + signum_str.appfmt(" Initiated by signal %d.", + Q(signum)); + ndbout_c("Node %u: Node shutdown completed%s.%s", + R, action_str.c_str(), signum_str.c_str()); + } + break; +#undef EVENT +#define EVENT NDBStopForced + case NDB_LE_NDBStopForced: + { + BaseString action_str(""); + BaseString reason_str(""); + BaseString sphase_str(""); + int signum = Q(signum); + int error = Q(error); + int sphase = Q(sphase); + int extra = Q(extra); + getRestartAction(Q(action), action_str); + if (signum) + reason_str.appfmt(" Initiated by signal %d.", signum); + if (error) + { + ndbd_exit_classification cl; + ndbd_exit_status st; + const char *msg = ndbd_exit_message(error, &cl); + const char *cl_msg = ndbd_exit_classification_message(cl, &st); + const char *st_msg = ndbd_exit_status_message(st); + reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.", + error, msg, cl_msg, st_msg); + if (extra != 0) + reason_str.appfmt(" (extra info %d)", extra); + } + if (sphase < 255) + sphase_str.appfmt(" Occured during startphase %u.", sphase); + ndbout_c("Node %u: Forced node shutdown completed%s.%s%s", + R, action_str.c_str(), sphase_str.c_str(), + reason_str.c_str()); + } + break; +#undef EVENT +#define EVENT StopAborted + case NDB_LE_NDBStopAborted: + ndbout_c("Node %u: Node shutdown aborted", R); + break; + /** + * default nothing to print + */ + default: + break; + } +} + //***************************************************************************** //***************************************************************************** @@ -720,27 +850,21 @@ event_thread_run(void* p) int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 1, NDB_MGM_EVENT_CATEGORY_STARTUP, 0 }; - int fd = ndb_mgm_listen_event(handle, filter); - if (fd != NDB_INVALID_SOCKET) + + NdbLogEventHandle log_handle= NULL; + struct ndb_logevent log_event; + + log_handle= ndb_mgm_create_logevent_handle(handle, filter); + if (log_handle) { do_event_thread= 1; - char *tmp= 0; - char buf[1024]; - SocketInputStream in(fd,10); do { - if (tmp == 0) NdbSleep_MilliSleep(10); - if((tmp = in.gets(buf, 1024))) - { - const char ping_token[]= "<PING>"; - if (memcmp(ping_token,tmp,sizeof(ping_token)-1)) - if(tmp && strlen(tmp)) - { - Guard g(printmutex); - ndbout << tmp; - } - } + if (ndb_logevent_get_next(log_handle, &log_event, 2000) <= 0) + continue; + Guard g(printmutex); + printLogEvent(&log_event); } while(do_event_thread); - NDB_CLOSE_SOCKET(fd); + ndb_mgm_destroy_logevent_handle(&log_handle); } else { @@ -1005,6 +1129,7 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive) } else if(strcasecmp(firstToken, "ENTER") == 0 && allAfterFirstToken != NULL && + allAfterFirstToken != NULL && strncasecmp(allAfterFirstToken, "SINGLE USER MODE ", sizeof("SINGLE USER MODE") - 1) == 0){ m_error = executeEnterSingleUser(allAfterFirstToken); @@ -1478,7 +1603,6 @@ CommandInterpreter::executePurge(char* parameters) return -1; } - int i; char *str; if (ndb_mgm_purge_stale_sessions(m_mgmsrv, &str)) { @@ -1596,7 +1720,6 @@ CommandInterpreter::executeConnect(char* parameters, bool interactive) { BaseString *basestring = NULL; - int retval; disconnect(); if (!emptyString(parameters)) { basestring= new BaseString(parameters); @@ -1965,6 +2088,9 @@ CommandInterpreter::executeRestart(Vector<BaseString> &command_list, return -1; } + if (nostart) + ndbout_c("Shutting down nodes with \"-n, no start\" option, to subsequently start the nodes."); + result= ndb_mgm_restart3(m_mgmsrv, no_of_nodes, node_ids, initialstart, nostart, abort, &need_disconnect); @@ -2039,7 +2165,6 @@ CommandInterpreter::executeStatus(int processId, ndb_mgm_node_status status; Uint32 startPhase, version; - bool system; struct ndb_mgm_cluster_state *cl; cl = ndb_mgm_get_status(m_mgmsrv); @@ -2057,6 +2182,19 @@ CommandInterpreter::executeStatus(int processId, ndbout << processId << ": Node not found" << endl; return -1; } + if (cl->node_states[i].node_type != NDB_MGM_NODE_TYPE_NDB){ + if (cl->node_states[i].version != 0){ + version = cl->node_states[i].version; + ndbout << "Node "<< cl->node_states[i].node_id <<": connected" ; + ndbout_c(" (Version %d.%d.%d)", + getMajor(version) , + getMinor(version), + getBuild(version)); + + }else + ndbout << "Node "<< cl->node_states[i].node_id <<": not connected" << endl; + return 0; + } status = cl->node_states[i].node_status; startPhase = cl->node_states[i].start_phase; version = cl->node_states[i].version; @@ -2460,8 +2598,7 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive) { struct ndb_mgm_reply reply; unsigned int backupId; - int fd = -1; - + Vector<BaseString> args; { BaseString(parameters).split(args); @@ -2478,7 +2615,6 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive) if (sz == 2 && args[1] == "NOWAIT") { flags = 0; - result = ndb_mgm_start_backup(m_mgmsrv, 0, &backupId, &reply); } else if (sz == 1 || (sz == 3 && args[1] == "WAIT" && args[2] == "COMPLETED")) { @@ -2496,18 +2632,17 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive) return -1; } - /** - * If interactive...event listner is already running - */ + NdbLogEventHandle log_handle= NULL; + struct ndb_logevent log_event; if (flags == 2 && !interactive) { int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0, 0 }; - fd = ndb_mgm_listen_event(m_mgmsrv, filter); - if (fd < 0) + log_handle = ndb_mgm_create_logevent_handle(m_mgmsrv, filter); + if (!log_handle) { ndbout << "Initializing start of backup failed" << endl; printError(); - return fd; + return -1; } } result = ndb_mgm_start_backup(m_mgmsrv, flags, &backupId, &reply); @@ -2516,41 +2651,55 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive) ndbout << "Backup failed" << endl; printError(); - if (fd >= 0) - close(fd); + if (log_handle) + ndb_mgm_destroy_logevent_handle(&log_handle); return result; } - if (fd >= 0) + /** + * If interactive, event listner thread is already running + */ + if (log_handle && !interactive) { - char *tmp; - char buf[1024]; - { - SocketInputStream in(fd); - int count = 0; - do { - tmp = in.gets(buf, 1024); - if(tmp) - { - ndbout << tmp; - unsigned int id; - if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){ - count++; - } - } - } while(count < 2); - } - - SocketInputStream in(fd, 10); + int count = 0; + int retry = 0; do { - tmp = in.gets(buf, 1024); - if(tmp && tmp[0] != 0) + if (ndb_logevent_get_next(log_handle, &log_event, 60000) > 0) + { + int print = 0; + switch (log_event.type) { + case NDB_LE_BackupStarted: + if (log_event.BackupStarted.backup_id == backupId) + print = 1; + break; + case NDB_LE_BackupCompleted: + if (log_event.BackupCompleted.backup_id == backupId) + print = 1; + break; + case NDB_LE_BackupAborted: + if (log_event.BackupAborted.backup_id == backupId) + print = 1; + break; + default: + break; + } + if (print) + { + Guard g(m_print_mutex); + printLogEvent(&log_event); + count++; + } + } + else { - ndbout << tmp; + retry++; } - } while(tmp && tmp[0] != 0); - - close(fd); + } while(count < 2 && retry < 3); + + if (retry >= 3) + ndbout << "get backup event failed for " << retry << " times" << endl; + + ndb_mgm_destroy_logevent_handle(&log_handle); } return 0; diff --git a/storage/ndb/src/mgmclient/Makefile.am b/storage/ndb/src/mgmclient/Makefile.am index 5b2009240c3..41f659cf68d 100644 --- a/storage/ndb/src/mgmclient/Makefile.am +++ b/storage/ndb/src/mgmclient/Makefile.am @@ -21,7 +21,8 @@ libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \ ../common/logger/liblogger.la \ ../common/portlib/libportlib.la \ ../common/util/libgeneral.la \ - ../common/portlib/libportlib.la + ../common/portlib/libportlib.la \ + ../common/debugger/libtrace.la ndb_mgm_SOURCES = main.cpp @@ -35,6 +36,7 @@ INCLUDES += -I$(top_srcdir)/storage/ndb/include/mgmapi \ LDADD_LOC = $(noinst_LTLIBRARIES) \ ../common/portlib/libportlib.la \ @readline_link@ \ + $(top_builddir)/storage/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/strings/libmystrings.a \ diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp index f9b093f132a..fbd81c71700 100644 --- a/storage/ndb/src/mgmclient/main.cpp +++ b/storage/ndb/src/mgmclient/main.cpp @@ -23,6 +23,8 @@ extern "C" { #elif !defined(__NETWARE__) #include <readline/readline.h> extern "C" int add_history(const char *command); /* From readline directory */ +extern "C" int read_history(const char *command); +extern "C" int write_history(const char *command); #define HAVE_READLINE #endif } @@ -71,11 +73,11 @@ static struct my_option my_long_options[] = NDB_STD_OPTS("ndb_mgm"), { "execute", 'e', "execute command and exit", - (gptr*) &opt_execute_str, (gptr*) &opt_execute_str, 0, + (uchar**) &opt_execute_str, (uchar**) &opt_execute_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "try-reconnect", 't', "Specify number of tries for connecting to ndb_mgmd (0 = infinite)", - (gptr*) &_try_reconnect, (gptr*) &_try_reconnect, 0, + (uchar**) &_try_reconnect, (uchar**) &_try_reconnect, 0, GET_UINT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -128,8 +130,6 @@ read_and_execute(int _try_reconnect) int main(int argc, char** argv){ NDB_INIT(argv[0]); - const char *_host = 0; - int _port = 0; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; @@ -157,10 +157,35 @@ int main(int argc, char** argv){ signal(SIGPIPE, handler); com = new Ndb_mgmclient(opt_connect_str,1); int ret= 0; + BaseString histfile; if (!opt_execute_str) { +#ifdef HAVE_READLINE + char *histfile_env= getenv("NDB_MGM_HISTFILE"); + if (histfile_env) + histfile.assign(histfile_env,strlen(histfile_env)); + else if(getenv("HOME")) + { + histfile.assign(getenv("HOME"),strlen(getenv("HOME"))); + histfile.append("/.ndb_mgm_history"); + } + if (histfile.length()) + read_history(histfile.c_str()); +#endif + ndbout << "-- NDB Cluster -- Management Client --" << endl; while(read_and_execute(_try_reconnect)); + +#ifdef HAVE_READLINE + if (histfile.length()) + { + BaseString histfile_tmp; + histfile_tmp.assign(histfile); + histfile_tmp.append(".TMP"); + if(!write_history(histfile_tmp.c_str())) + my_rename(histfile_tmp.c_str(), histfile.c_str(), MYF(MY_WME)); + } +#endif } else { diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 229824c49bf..9cbb7d93ceb 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -14,6 +14,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <ndb_global.h> +#ifndef NDB_MGMAPI #include <ndb_opt_defaults.h> #include <NdbTCP.h> @@ -26,6 +27,11 @@ extern my_bool opt_ndb_shm; extern my_bool opt_core; +#else +#include "ConfigInfo.hpp" +#include <mgmapi_config_parameters.h> +#endif /* NDB_MGMAPI */ + #define MAX_LINE_LENGTH 255 #define KEY_INTERNAL 0 #define MAX_INT_RNIL 0xfffffeff @@ -46,6 +52,7 @@ extern my_bool opt_core; #define MGM_TOKEN "MGM" #define API_TOKEN "API" +#ifndef NDB_MGMAPI const ConfigInfo::AliasPair ConfigInfo::m_sectionNameAliases[]={ {API_TOKEN, "MYSQLD"}, @@ -218,6 +225,7 @@ const DepricationTransform f_deprication[] = { { MGM_TOKEN, "Id", "NodeId", 0, 1 }, { 0, 0, 0, 0, 0} }; +#endif /* NDB_MGMAPI */ /** * The default constructors create objects with suitable values for the @@ -449,7 +457,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::CI_INT, "128", "8", - STR_VALUE(MAX_INT_RNIL) }, + STR_VALUE(MAX_TABLES) }, { CFG_DB_NO_ORDERED_INDEXES, @@ -903,7 +911,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ConfigInfo::CI_USED, false, ConfigInfo::CI_INT, - "40", + "0", "20", STR_VALUE(MAX_INT_RNIL) }, @@ -2202,7 +2210,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo); - +#ifndef NDB_MGMAPI /**************************************************************************** * Ctor ****************************************************************************/ @@ -2309,7 +2317,6 @@ ConfigInfo::ConfigInfo() break; case CI_BOOL: { - bool tmp_bool; require(InitConfigFileParser::convertStringToBool(param._default, default_bool)); require(p->put(param._fname, default_bool)); break; @@ -2317,7 +2324,6 @@ ConfigInfo::ConfigInfo() case CI_INT: case CI_INT64: { - Uint64 tmp_uint64; require(InitConfigFileParser::convertStringToUint64(param._default, default_uint64)); require(p->put(param._fname, default_uint64)); break; @@ -2828,7 +2834,7 @@ applyDefaultValues(InitConfigFileParser::Context & ctx, Properties::Iterator it(defaults); for(const char * name = it.first(); name != NULL; name = it.next()){ - ConfigInfo::Status st = ctx.m_info->getStatus(ctx.m_currentInfo, name); + (void) ctx.m_info->getStatus(ctx.m_currentInfo, name); if(!ctx.m_currentSection->contains(name)){ switch (ctx.m_info->getType(ctx.m_currentInfo, name)){ case ConfigInfo::CI_INT: @@ -2939,25 +2945,50 @@ static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data) char buf[] = "NodeIdX"; buf[6] = data[sizeof("NodeI")]; char sysbuf[] = "SystemX"; sysbuf[6] = data[sizeof("NodeI")]; const char* nodeId; - require(ctx.m_currentSection->get(buf, &nodeId)); + if(!ctx.m_currentSection->get(buf, &nodeId)) + { + ctx.reportError("Mandatory parameter %s missing from section" + "[%s] starting at line: %d", + buf, ctx.fname, ctx.m_sectionLineno); + return false; + } char tmpLine[MAX_LINE_LENGTH]; strncpy(tmpLine, nodeId, MAX_LINE_LENGTH); char* token1 = strtok(tmpLine, "."); char* token2 = strtok(NULL, "."); Uint32 id; - + + if(!token1) + { + ctx.reportError("Value for mandatory parameter %s missing from section " + "[%s] starting at line: %d", + buf, ctx.fname, ctx.m_sectionLineno); + return false; + } if (token2 == NULL) { // Only a number given errno = 0; char* p; id = strtol(token1, &p, 10); - if (errno != 0) warning("STRTOK1", nodeId); + if (errno != 0 || id <= 0x0 || id > MAX_NODES) + { + ctx.reportError("Illegal value for mandatory parameter %s from section " + "[%s] starting at line: %d", + buf, ctx.fname, ctx.m_sectionLineno); + return false; + } require(ctx.m_currentSection->put(buf, id, true)); } else { // A pair given (e.g. "uppsala.32") errno = 0; char* p; id = strtol(token2, &p, 10); - if (errno != 0) warning("STRTOK2", nodeId); + if (errno != 0 || id <= 0x0 || id > MAX_NODES) + { + ctx.reportError("Illegal value for mandatory parameter %s from section " + "[%s] starting at line: %d", + buf, ctx.fname, ctx.m_sectionLineno); + return false; + } require(ctx.m_currentSection->put(buf, id, true)); require(ctx.m_currentSection->put(sysbuf, token1)); } @@ -3459,7 +3490,7 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){ if(!ctx.m_currentInfo->get(n, &info)) continue; - Uint32 id = 0; + id = 0; info->get("Id", &id); if(id == KEY_INTERNAL) @@ -3663,6 +3694,7 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions, Uint32 db_nodes= 0; Uint32 replicas= 0; Uint32 db_host_count= 0; + bool with_arbitration_rank= false; ctx.m_userProperties.get(DB_TOKEN, &db_nodes); ctx.m_userProperties.get("NoOfReplicas", &replicas); if((db_nodes % replicas) != 0){ @@ -3698,85 +3730,92 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions, tmp->get("HostName", &host); if (strcmp(type,DB_TOKEN) == 0) - { - { - Uint32 ii; - if (!p_db_hosts.get(host,&ii)) - db_host_count++; - p_db_hosts.put(host,i); - if (p_arbitrators.get(host,&ii)) - { - arbitration_warning.appfmt(arbit_warn_fmt, ii, i, host); - p_arbitrators.remove(host); // only one warning per db node - } - } - { - unsigned j; - BaseString str, str2; - str.assfmt("#group%d_",group); - p_db_hosts.put(str.c_str(),i_group,host); - str2.assfmt("##group%d_",group); - p_db_hosts.put(str2.c_str(),i_group,i); - for (j= 0; j < i_group; j++) - { - const char *other_host; - p_db_hosts.get(str.c_str(),j,&other_host); - if (strcmp(host,other_host) == 0) { - unsigned int other_i, c= 0; - p_db_hosts.get(str2.c_str(),j,&other_i); - p_db_hosts.get(str.c_str(),&c); - if (c == 0) // first warning in this node group - node_group_warning.appfmt(" Node group %d", group); - c|= 1 << j; - p_db_hosts.put(str.c_str(),c); - - node_group_warning.appfmt(",\n db node with id %d and id %d " - "on same host %s", other_i, i, host); - } - } - i_group++; - DBUG_ASSERT(i_group <= replicas); - if (i_group == replicas) - { - unsigned c= 0; - p_db_hosts.get(str.c_str(),&c); - if (c+1 == (1u << (replicas-1))) // all nodes on same machine - node_group_warning.append(".\n Host failure will " - "cause complete cluster shutdown."); - else if (c > 0) - node_group_warning.append(".\n Host failure may " - "cause complete cluster shutdown."); - group++; - i_group= 0; - } - } + { + { + Uint32 ii; + if (!p_db_hosts.get(host,&ii)) + db_host_count++; + p_db_hosts.put(host,i); + if (p_arbitrators.get(host,&ii)) + { + arbitration_warning.appfmt(arbit_warn_fmt, ii, i, host); + p_arbitrators.remove(host); // only one warning per db node + } + } + { + unsigned j; + BaseString str, str2; + str.assfmt("#group%d_",group); + p_db_hosts.put(str.c_str(),i_group,host); + str2.assfmt("##group%d_",group); + p_db_hosts.put(str2.c_str(),i_group,i); + for (j= 0; j < i_group; j++) + { + const char *other_host; + p_db_hosts.get(str.c_str(),j,&other_host); + if (strcmp(host,other_host) == 0) { + unsigned int other_i, c= 0; + p_db_hosts.get(str2.c_str(),j,&other_i); + p_db_hosts.get(str.c_str(),&c); + if (c == 0) // first warning in this node group + node_group_warning.appfmt(" Node group %d", group); + c|= 1 << j; + p_db_hosts.put(str.c_str(),c); + node_group_warning.appfmt(",\n db node with id %d and id %d " + "on same host %s", other_i, i, host); + } + } + i_group++; + DBUG_ASSERT(i_group <= replicas); + if (i_group == replicas) + { + unsigned c= 0; + p_db_hosts.get(str.c_str(),&c); + if (c+1 == (1u << (replicas-1))) // all nodes on same machine + node_group_warning.append(".\n Host failure will " + "cause complete cluster shutdown."); + else if (c > 0) + node_group_warning.append(".\n Host failure may " + "cause complete cluster shutdown."); + group++; + i_group= 0; + } + } } else if (strcmp(type,API_TOKEN) == 0 || strcmp(type,MGM_TOKEN) == 0) - { - Uint32 rank; - if(tmp->get("ArbitrationRank", &rank) && rank > 0) - { - if(host && host[0] != 0) - { - Uint32 ii; - p_arbitrators.put(host,i); - if (p_db_hosts.get(host,&ii)) - { - arbitration_warning.appfmt(arbit_warn_fmt, i, ii, host); - } - } - else - { - arbitration_warning.appfmt(arbit_warn_fmt2, i); - } - } + { + Uint32 rank; + if(tmp->get("ArbitrationRank", &rank) && rank > 0) + { + with_arbitration_rank = true; //check whether MGM or API node configured with rank >0 + if(host && host[0] != 0) + { + Uint32 ii; + p_arbitrators.put(host,i); + if (p_db_hosts.get(host,&ii)) + { + arbitration_warning.appfmt(arbit_warn_fmt, i, ii, host); + } + } + else + { + arbitration_warning.appfmt(arbit_warn_fmt2, i); + } + } } } if (db_host_count > 1 && node_group_warning.length() > 0) - ndbout_c("Cluster configuration warning:\n%s",node_group_warning.c_str()); + ctx.reportWarning("Cluster configuration warning:\n%s",node_group_warning.c_str()); + if (!with_arbitration_rank) + { + ctx.reportWarning("Cluster configuration warning:" + "\n Neither %s nor %s nodes are configured with arbitrator," + "\n may cause complete cluster shutdown in case of host failure.", + MGM_TOKEN, API_TOKEN); + } if (db_host_count > 1 && arbitration_warning.length() > 0) - ndbout_c("Cluster configuration warning:%s%s",arbitration_warning.c_str(), + ctx.reportWarning("Cluster configuration warning:%s%s",arbitration_warning.c_str(), "\n Running arbitrator on the same host as a database node may" "\n cause complete cluster shutdown in case of host failure."); } @@ -3784,3 +3823,4 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions, } template class Vector<ConfigInfo::ConfigRuleSection>; +#endif /* NDB_MGMAPI */ diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.hpp b/storage/ndb/src/mgmsrv/ConfigInfo.hpp index 08b12522807..94543dbdc5e 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.hpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.hpp @@ -16,18 +16,23 @@ #ifndef ConfigInfo_H #define ConfigInfo_H +#ifndef NDB_MGMAPI #include <kernel_types.h> #include <Properties.hpp> #include <ndb_limits.h> #include <NdbOut.hpp> #include "InitConfigFileParser.hpp" +#endif /* NDB_MGMAPI */ /** * A MANDATORY parameters must be specified in the config file * An UNDEFINED parameter may or may not be specified in the config file */ -static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params. -static const char* UNDEFINED = 0; // Default value for undefined params. + +// Default value for mandatory params. +#define MANDATORY ((char*)~(UintPtr)0) +// Default value for undefined params. +#define UNDEFINED ((char*) 0) /** * @class ConfigInfo @@ -60,6 +65,7 @@ public: const char* _max; }; +#ifndef NDB_MGMAPI struct AliasPair{ const char * name; const char * alias; @@ -129,14 +135,17 @@ private: static const AliasPair m_sectionNameAliases[]; static const char* m_sectionNames[]; static const int m_noOfSectionNames; +#endif /* NDB_MGMAPI */ public: static const ParamInfo m_ParamInfo[]; static const int m_NoOfParams; +#ifndef NDB_MGMAPI static const SectionRule m_SectionRules[]; static const ConfigRule m_ConfigRules[]; static const int m_NoOfRules; +#endif /* NDB_MGMAPI */ }; #endif // ConfigInfo_H diff --git a/storage/ndb/src/mgmsrv/ERROR_codes.txt b/storage/ndb/src/mgmsrv/ERROR_codes.txt new file mode 100644 index 00000000000..44a6047c05e --- /dev/null +++ b/storage/ndb/src/mgmsrv/ERROR_codes.txt @@ -0,0 +1,29 @@ +Next Session 10 +Next Global 10001 + + +#define MGM_ERROR_MAX_INJECT_SESSION_ONLY 10000 +Errors < 10000 are per-session only - in MgmApiSession. + +Others are for the whole mgm server. + +Error 0 is no error + +TIMEOUTS +-------- + +num where type testing + +1 get config sleep begin +2 get config sleep middle parsable +3 get config mangle halfway through encoded properties + +4 end session sleep before reply + +5 node status sleep before reply +6 node status sleep during parsable reply +7 node status sleep after parsable, before status reply +8 node status sleep partway through status reporting +9 node status sleep end of status printing + +10000 events PING no ping don't send pings to event listeners diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp index b8c4bbc590d..569cb1eb654 100644 --- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp +++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp @@ -612,10 +612,11 @@ static my_bool parse_mycnf_opt(int, const struct my_option * opt, char * value) { + long *app_type= (long*) &opt->app_type; if(opt->comment) - ((struct my_option *)opt)->app_type++; + (*app_type)++; else - ((struct my_option *)opt)->app_type = order++; + *app_type = order++; return 0; } @@ -701,34 +702,35 @@ load_defaults(Vector<struct my_option>& options, const char* groups[]) BaseString extra_file; BaseString group_suffix; - const char *save_file = defaults_file; - char *save_extra_file = defaults_extra_file; - const char *save_group_suffix = defaults_group_suffix; + const char *save_file = my_defaults_file; + char *save_extra_file = my_defaults_extra_file; + const char *save_group_suffix = my_defaults_group_suffix; - if (defaults_file) + if (my_defaults_file) { - file.assfmt("--defaults-file=%s", defaults_file); + file.assfmt("--defaults-file=%s", my_defaults_file); argv[argc++] = file.c_str(); } - if (defaults_extra_file) + if (my_defaults_extra_file) { - extra_file.assfmt("--defaults-extra-file=%s", defaults_extra_file); + extra_file.assfmt("--defaults-extra-file=%s", my_defaults_extra_file); argv[argc++] = extra_file.c_str(); } - if (defaults_group_suffix) + if (my_defaults_group_suffix) { - group_suffix.assfmt("--defaults-group-suffix=%s", defaults_group_suffix); + group_suffix.assfmt("--defaults-group-suffix=%s", + my_defaults_group_suffix); argv[argc++] = group_suffix.c_str(); } char ** tmp = (char**)argv; int ret = load_defaults("my", groups, &argc, &tmp); - defaults_file = save_file; - defaults_extra_file = save_extra_file; - defaults_group_suffix = save_group_suffix; + my_defaults_file = save_file; + my_defaults_extra_file = save_extra_file; + my_defaults_group_suffix = save_group_suffix; if (ret == 0) { @@ -779,19 +781,19 @@ InitConfigFileParser::parse_mycnf() const ConfigInfo::ParamInfo& param = ConfigInfo::m_ParamInfo[i]; switch(param._type){ case ConfigInfo::CI_BOOL: - opt.value = (gptr*)malloc(sizeof(int)); + opt.value = (uchar **)malloc(sizeof(int)); opt.var_type = GET_INT; break; case ConfigInfo::CI_INT: - opt.value = (gptr*)malloc(sizeof(int)); + opt.value = (uchar**)malloc(sizeof(int)); opt.var_type = GET_INT; break; case ConfigInfo::CI_INT64: - opt.value = (gptr*)malloc(sizeof(Int64)); + opt.value = (uchar**)malloc(sizeof(Int64)); opt.var_type = GET_LL; break; case ConfigInfo::CI_STRING: - opt.value = (gptr*)malloc(sizeof(char *)); + opt.value = (uchar**)malloc(sizeof(char *)); opt.var_type = GET_STR; break; default: @@ -817,28 +819,28 @@ InitConfigFileParser::parse_mycnf() bzero(&opt, sizeof(opt)); opt.name = "ndbd"; opt.id = 256; - opt.value = (gptr*)malloc(sizeof(char*)); + opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "ndb_mgmd"; opt.id = 256; - opt.value = (gptr*)malloc(sizeof(char*)); + opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "mysqld"; opt.id = 256; - opt.value = (gptr*)malloc(sizeof(char*)); + opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "ndbapi"; opt.id = 256; - opt.value = (gptr*)malloc(sizeof(char*)); + opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); @@ -947,22 +949,6 @@ end: template class Vector<struct my_option>; -#if 0 -struct my_option -{ - const char *name; /* Name of the option */ - int id; /* unique id or short option */ - const char *comment; /* option comment, for autom. --help */ - gptr *value; /* The variable value */ - gptr *u_max_value; /* The user def. max variable value */ - const char **str_values; /* Pointer to possible values */ - ulong var_type; - enum get_opt_arg_type arg_type; - longlong def_value; /* Default value */ - longlong min_value; /* Min allowed value */ - longlong max_value; /* Max allowed value */ - longlong sub_size; /* Subtract this from given value */ - long block_size; /* Value should be a mult. of this */ - int app_type; /* To be used by an application */ -}; -#endif +/* + See include/my_getopt.h for the declaration of struct my_option +*/ diff --git a/storage/ndb/src/mgmsrv/Makefile.am b/storage/ndb/src/mgmsrv/Makefile.am index adde2ad5d34..c19f885ae8d 100644 --- a/storage/ndb/src/mgmsrv/Makefile.am +++ b/storage/ndb/src/mgmsrv/Makefile.am @@ -38,7 +38,7 @@ INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/ndbapi \ -I$(top_srcdir)/storage/ndb/src/common/mgmcommon \ -I$(top_srcdir)/storage/ndb/src/mgmclient -LDADD_LOC = $(top_builddir)/storage/ndb/src/mgmclient/CommandInterpreter.o \ +LDADD_LOC = $(top_builddir)/storage/ndb/src/mgmclient/CommandInterpreter.lo \ $(top_builddir)/storage/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index f377e78bb5f..ee5bb5103d8 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -18,6 +18,7 @@ #include "MgmtSrvr.hpp" #include "MgmtErrorReporter.hpp" +#include "ndb_mgmd_error.h" #include <ConfigRetriever.hpp> #include <NdbOut.hpp> @@ -66,6 +67,9 @@ #define DEBUG(x) #endif +int g_errorInsert; +#define ERROR_INSERTED(x) (g_errorInsert == x) + #define INIT_SIGNAL_SENDER(ss,nodeId) \ SignalSender ss(theFacade); \ ss.lock(); /* lock will be released on exit */ \ @@ -100,6 +104,7 @@ MgmtSrvr::logLevelThread_C(void* m) extern EventLogger g_eventLogger; +#ifdef NOT_USED static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -109,6 +114,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif void MgmtSrvr::logLevelThreadRun() @@ -175,6 +181,10 @@ MgmtSrvr::logLevelThreadRun() m_log_level_requests.lock(); } m_log_level_requests.unlock(); + + if(!ERROR_INSERTED(10000)) + m_event_listner.check_listeners(); + NdbSleep_MilliSleep(_logLevelThreadSleep); } } @@ -224,19 +234,12 @@ MgmtSrvr::startEventLog() } } -void -MgmtSrvr::stopEventLog() +void +MgmtSrvr::stopEventLog() { - // Nothing yet + g_eventLogger.close(); } -class ErrorItem -{ -public: - int _errorCode; - const char * _errorText; -}; - bool MgmtSrvr::setEventLogFilter(int severity, int enable) { @@ -259,62 +262,6 @@ MgmtSrvr::isEventLogFilterEnabled(int severity) return g_eventLogger.isEnable((Logger::LoggerLevel)severity); } -static ErrorItem errorTable[] = -{ - {MgmtSrvr::NO_CONTACT_WITH_PROCESS, "No contact with the process (dead ?)."}, - {MgmtSrvr::PROCESS_NOT_CONFIGURED, "The process is not configured."}, - {MgmtSrvr::WRONG_PROCESS_TYPE, - "The process has wrong type. Expected a DB process."}, - {MgmtSrvr::COULD_NOT_ALLOCATE_MEMORY, "Could not allocate memory."}, - {MgmtSrvr::SEND_OR_RECEIVE_FAILED, "Send to process or receive failed."}, - {MgmtSrvr::INVALID_LEVEL, "Invalid level. Should be between 1 and 30."}, - {MgmtSrvr::INVALID_ERROR_NUMBER, "Invalid error number. Should be >= 0."}, - {MgmtSrvr::INVALID_TRACE_NUMBER, "Invalid trace number."}, - {MgmtSrvr::NOT_IMPLEMENTED, "Not implemented."}, - {MgmtSrvr::INVALID_BLOCK_NAME, "Invalid block name"}, - - {MgmtSrvr::CONFIG_PARAM_NOT_EXIST, - "The configuration parameter does not exist for the process type."}, - {MgmtSrvr::CONFIG_PARAM_NOT_UPDATEABLE, - "The configuration parameter is not possible to update."}, - {MgmtSrvr::VALUE_WRONG_FORMAT_INT_EXPECTED, - "Incorrect value. Expected integer."}, - {MgmtSrvr::VALUE_TOO_LOW, "Value is too low."}, - {MgmtSrvr::VALUE_TOO_HIGH, "Value is too high."}, - {MgmtSrvr::VALUE_WRONG_FORMAT_BOOL_EXPECTED, - "Incorrect value. Expected TRUE or FALSE."}, - - {MgmtSrvr::CONFIG_FILE_OPEN_WRITE_ERROR, - "Could not open configuration file for writing."}, - {MgmtSrvr::CONFIG_FILE_OPEN_READ_ERROR, - "Could not open configuration file for reading."}, - {MgmtSrvr::CONFIG_FILE_WRITE_ERROR, - "Write error when writing configuration file."}, - {MgmtSrvr::CONFIG_FILE_READ_ERROR, - "Read error when reading configuration file."}, - {MgmtSrvr::CONFIG_FILE_CLOSE_ERROR, "Could not close configuration file."}, - - {MgmtSrvr::CONFIG_CHANGE_REFUSED_BY_RECEIVER, - "The change was refused by the receiving process."}, - {MgmtSrvr::COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM, - "The change could not be synced against physical medium."}, - {MgmtSrvr::CONFIG_FILE_CHECKSUM_ERROR, - "The config file is corrupt. Checksum error."}, - {MgmtSrvr::NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE, - "It is not possible to send an update of a configuration variable " - "to this kind of process."}, - {MgmtSrvr::NODE_SHUTDOWN_IN_PROGESS, "Node shutdown in progress" }, - {MgmtSrvr::SYSTEM_SHUTDOWN_IN_PROGRESS, "System shutdown in progress" }, - {MgmtSrvr::NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, - "Node shutdown would cause system crash" }, - {MgmtSrvr::UNSUPPORTED_NODE_SHUTDOWN, - "Unsupported multi node shutdown. Abort option required." }, - {MgmtSrvr::NODE_NOT_API_NODE, "The specified node is not an API node." }, - {MgmtSrvr::OPERATION_NOT_ALLOWED_START_STOP, - "Operation not allowed while nodes are starting or stopping."}, - {MgmtSrvr::NO_CONTACT_WITH_DB_NODES, "No contact with database nodes" } -}; - int MgmtSrvr::translateStopRef(Uint32 errCode) { switch(errCode){ @@ -334,8 +281,6 @@ int MgmtSrvr::translateStopRef(Uint32 errCode) return 4999; } -static int noOfErrorCodes = sizeof(errorTable) / sizeof(ErrorItem); - int MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const { @@ -619,6 +564,16 @@ MgmtSrvr::start(BaseString &error_string) ndbout_c("This is probably a bug."); } + /* + set api reg req frequency quite high: + + 100 ms interval to make sure we have fairly up-to-date + info from the nodes. This to make sure that this info + is not dependent on heart beat settings in the + configuration + */ + theFacade->theClusterMgr->set_max_api_reg_req_interval(100); + TransporterRegistry *reg = theFacade->get_registry(); for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) { BaseString msg; @@ -1127,7 +1082,9 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, break; } case GSN_STOP_CONF:{ +#ifdef NOT_USED const StopConf * const ref = CAST_CONSTPTR(StopConf, signal->getDataPtr()); +#endif const NodeId nodeId = refToNode(signal->header.theSendersBlockRef); #ifdef VM_TRACE ndbout_c("Node %d single user mode", nodeId); @@ -1160,7 +1117,6 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, const NodeFailRep * const rep = CAST_CONSTPTR(NodeFailRep, signal->getDataPtr()); NdbNodeBitmask mask; - char buf[100]; mask.assign(NdbNodeBitmask::Size, rep->theNodes); mask.bitAND(notstarted); nodes.bitANDC(mask); @@ -1354,7 +1310,7 @@ int MgmtSrvr::restartNodes(const Vector<NodeId> &node_ids, for (unsigned i = 0; i < node_ids.size(); i++) { - int result = start(node_ids[i]); + (void) start(node_ids[i]); } return 0; } @@ -1727,14 +1683,31 @@ MgmtSrvr::setNodeLogLevelImpl(int nodeId, const SetLogLevelOrd & ll) int MgmtSrvr::insertError(int nodeId, int errorNo) { + int block; + if (errorNo < 0) { return INVALID_ERROR_NUMBER; } - INIT_SIGNAL_SENDER(ss,nodeId); - + SignalSender ss(theFacade); + ss.lock(); /* lock will be released on exit */ + + if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_NDB) + { + block= CMVMI; + } + else if(nodeId == _ownNodeId) + { + g_errorInsert= errorNo; + return 0; + } + else if(getNodeType(nodeId) == NDB_MGM_NODE_TYPE_MGM) + block= _blockNumber; + else + return WRONG_PROCESS_TYPE; + SimpleSignal ssig; - ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_TAMPER_ORD, + ssig.set(ss,TestOrd::TraceAPI, block, GSN_TAMPER_ORD, TamperOrd::SignalLength); TamperOrd* const tamperOrd = CAST_PTR(TamperOrd, ssig.getDataPtrSend()); tamperOrd->errorNo = errorNo; @@ -1932,18 +1905,8 @@ MgmtSrvr::dumpState(int nodeId, const Uint32 args[], Uint32 no) const char* MgmtSrvr::getErrorText(int errorCode, char *buf, int buf_sz) { - - for (int i = 0; i < noOfErrorCodes; ++i) { - if (errorCode == errorTable[i]._errorCode) { - BaseString::snprintf(buf, buf_sz, errorTable[i]._errorText); - buf[buf_sz-1]= 0; - return buf; - } - } - ndb_error_string(errorCode, buf, buf_sz); buf[buf_sz-1]= 0; - return buf; } @@ -1969,6 +1932,10 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) case GSN_NODE_FAILREP: break; + case GSN_TAMPER_ORD: + ndbout << "TAMPER ORD" << endl; + break; + default: g_eventLogger.error("Unknown signal received. SignalNumber: " "%i from (%d, %x)", @@ -2118,8 +2085,10 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type) switch (gsn) { case GSN_ALLOC_NODEID_CONF: { +#ifdef NOT_USED const AllocNodeIdConf * const conf = CAST_CONSTPTR(AllocNodeIdConf, signal->getDataPtr()); +#endif return 0; } case GSN_ALLOC_NODEID_REF: @@ -2134,6 +2103,11 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type) nodeId = refToNode(ref->masterRef); if (!theFacade->get_node_alive(nodeId)) nodeId = 0; + if (ref->errorCode != AllocNodeIdRef::NotMaster) + { + /* sleep for a while (100ms) before retrying */ + NdbSleep_MilliSleep(100); + } continue; } return ref->errorCode; diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp index d73dd4561e9..90287554ef8 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -38,6 +38,10 @@ */ #define MGMSRV 1 +#define MGM_ERROR_MAX_INJECT_SESSION_ONLY 10000 + +extern int g_errorInsert; + class ConfigInfoServer; class NdbApiSignal; class Config; @@ -49,6 +53,7 @@ class Ndb_mgmd_event_service : public EventLoggerBase friend class MgmtSrvr; public: struct Event_listener : public EventLoggerBase { + Event_listener() {} NDB_SOCKET_TYPE m_socket; Uint32 m_parsable; }; @@ -143,45 +148,6 @@ public: */ bool isEventLogFilterEnabled(int severity); - STATIC_CONST( NO_CONTACT_WITH_PROCESS = 5000 ); - STATIC_CONST( PROCESS_NOT_CONFIGURED = 5001 ); - STATIC_CONST( WRONG_PROCESS_TYPE = 5002 ); - STATIC_CONST( COULD_NOT_ALLOCATE_MEMORY = 5003 ); - STATIC_CONST( SEND_OR_RECEIVE_FAILED = 5005 ); - STATIC_CONST( INVALID_LEVEL = 5006 ); - STATIC_CONST( INVALID_ERROR_NUMBER = 5007 ); - STATIC_CONST( INVALID_TRACE_NUMBER = 5008 ); - STATIC_CONST( NOT_IMPLEMENTED = 5009 ); - STATIC_CONST( INVALID_BLOCK_NAME = 5010 ); - - STATIC_CONST( CONFIG_PARAM_NOT_EXIST = 5011 ); - STATIC_CONST( CONFIG_PARAM_NOT_UPDATEABLE = 5012 ); - STATIC_CONST( VALUE_WRONG_FORMAT_INT_EXPECTED = 5013 ); - STATIC_CONST( VALUE_TOO_LOW = 5014 ); - STATIC_CONST( VALUE_TOO_HIGH = 5015 ); - STATIC_CONST( VALUE_WRONG_FORMAT_BOOL_EXPECTED = 5016 ); - - STATIC_CONST( CONFIG_FILE_OPEN_WRITE_ERROR = 5017 ); - STATIC_CONST( CONFIG_FILE_OPEN_READ_ERROR = 5018 ); - STATIC_CONST( CONFIG_FILE_WRITE_ERROR = 5019 ); - STATIC_CONST( CONFIG_FILE_READ_ERROR = 5020 ); - STATIC_CONST( CONFIG_FILE_CLOSE_ERROR = 5021 ); - - STATIC_CONST( CONFIG_CHANGE_REFUSED_BY_RECEIVER = 5022 ); - STATIC_CONST( COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM = 5023 ); - STATIC_CONST( CONFIG_FILE_CHECKSUM_ERROR = 5024 ); - STATIC_CONST( NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE = 5025 ); - - STATIC_CONST( NODE_SHUTDOWN_IN_PROGESS = 5026 ); - STATIC_CONST( SYSTEM_SHUTDOWN_IN_PROGRESS = 5027 ); - STATIC_CONST( NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH = 5028 ); - - STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 ); - STATIC_CONST( UNSUPPORTED_NODE_SHUTDOWN = 5031 ); - - STATIC_CONST( NODE_NOT_API_NODE = 5062 ); - STATIC_CONST( OPERATION_NOT_ALLOWED_START_STOP = 5063 ); - /** * This enum specifies the different signal loggig modes possible to set * with the setSignalLoggingMode method. @@ -593,7 +559,6 @@ private: */ enum WaitSignalType { NO_WAIT, // We don't expect to receive any signal - WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation }; diff --git a/storage/ndb/src/mgmsrv/ParamInfo.cpp b/storage/ndb/src/mgmsrv/ParamInfo.cpp deleted file mode 100644 index 4a4e80eb079..00000000000 --- a/storage/ndb/src/mgmsrv/ParamInfo.cpp +++ /dev/null @@ -1,2076 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include <ndb_global.h> -#include <../../include/kernel/ndb_limits.h> -#include "ParamInfo.hpp" -#include <mgmapi_config_parameters.h> - -#ifndef MYSQLCLUSTERDIR -#define MYSQLCLUSTERDIR "." -#endif - -#define KEY_INTERNAL 0 -#define MAX_INT_RNIL 0xfffffeff -#define MAX_PORT_NO 65535 - -#define _STR_VALUE(x) #x -#define STR_VALUE(x) _STR_VALUE(x) - -/**************************************************************************** - * Section names - ****************************************************************************/ -#define DB_TOKEN_PRINT "ndbd(DB)" -#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)" -#define API_TOKEN_PRINT "mysqld(API)" - -/** - * A MANDATORY parameters must be specified in the config file - * An UNDEFINED parameter may or may not be specified in the config file - */ -static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params. -static const char* UNDEFINED = 0; // Default value for undefined params. - -extern const ParamInfo ParamInfoArray[]; -extern const int ParamInfoNum; - -/** - * The default constructors create objects with suitable values for the - * configuration parameters. - * - * Some are however given the value MANDATORY which means that the value - * must be specified in the configuration file. - * - * Min and max values are also given for some parameters. - * - Attr1: Name in file (initial config file) - * - Attr2: Name in prop (properties object) - * - Attr3: Name of Section (in init config file) - * - Attr4: Updateable - * - Attr5: Type of parameter (INT or BOOL) - * - Attr6: Default Value (number only) - * - Attr7: Min value - * - Attr8: Max value - * - * Parameter constraints are coded in file Config.cpp. - * - * ******************************************************************* - * Parameters used under development should be marked "NOTIMPLEMENTED" - * ******************************************************************* - */ -const ParamInfo ParamInfoArray[] = { - - /**************************************************************************** - * COMPUTER - ***************************************************************************/ - { - KEY_INTERNAL, - "COMPUTER", - "COMPUTER", - "Computer section", - CI_INTERNAL, - false, - CI_SECTION, - 0, - 0, 0 }, - - { - KEY_INTERNAL, - "Id", - "COMPUTER", - "Name of computer", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - KEY_INTERNAL, - "HostName", - "COMPUTER", - "Hostname of computer (e.g. mysql.com)", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - KEY_INTERNAL, - "ByteOrder", - "COMPUTER", - 0, - CI_DEPRICATED, - false, - CI_STRING, - UNDEFINED, - 0, - 0 }, - - /**************************************************************************** - * SYSTEM - ***************************************************************************/ - { - CFG_SECTION_SYSTEM, - "SYSTEM", - "SYSTEM", - "System section", - CI_USED, - false, - CI_SECTION, - (const char *)CFG_SECTION_SYSTEM, - 0, 0 }, - - { - CFG_SYS_NAME, - "Name", - "SYSTEM", - "Name of system (NDB Cluster)", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - CFG_SYS_PRIMARY_MGM_NODE, - "PrimaryMGMNode", - "SYSTEM", - "Node id of Primary "MGM_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_SYS_CONFIG_GENERATION, - "ConfigGenerationNumber", - "SYSTEM", - "Configuration generation number", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - /*************************************************************************** - * DB - ***************************************************************************/ - { - CFG_SECTION_NODE, - DB_TOKEN, - DB_TOKEN, - "Node section", - CI_USED, - false, - CI_SECTION, - (const char *)NODE_TYPE_DB, - 0, 0 - }, - - { - CFG_NODE_HOST, - "HostName", - DB_TOKEN, - "Name of computer for this node", - CI_INTERNAL, - false, - CI_STRING, - "localhost", - 0, 0 }, - - { - CFG_NODE_SYSTEM, - "System", - DB_TOKEN, - "Name of system for this node", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - KEY_INTERNAL, - "Id", - DB_TOKEN, - "", - CI_DEPRICATED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - CFG_NODE_ID, - "NodeId", - DB_TOKEN, - "Number identifying the database node ("DB_TOKEN_PRINT")", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - KEY_INTERNAL, - "ServerPort", - DB_TOKEN, - "Port used to setup transporter", - CI_USED, - false, - CI_INT, - UNDEFINED, - "1", - STR_VALUE(MAX_PORT_NO) }, - - { - CFG_DB_NO_REPLICAS, - "NoOfReplicas", - DB_TOKEN, - "Number of copies of all data in the database (1-4)", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", - "4" }, - - { - CFG_DB_NO_ATTRIBUTES, - "MaxNoOfAttributes", - DB_TOKEN, - "Total number of attributes stored in database. I.e. sum over all tables", - CI_USED, - false, - CI_INT, - "1000", - "32", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_TABLES, - "MaxNoOfTables", - DB_TOKEN, - "Total number of tables stored in the database", - CI_USED, - false, - CI_INT, - "128", - "8", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_ORDERED_INDEXES, - "MaxNoOfOrderedIndexes", - DB_TOKEN, - "Total number of ordered indexes that can be defined in the system", - CI_USED, - false, - CI_INT, - "128", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_UNIQUE_HASH_INDEXES, - "MaxNoOfUniqueHashIndexes", - DB_TOKEN, - "Total number of unique hash indexes that can be defined in the system", - CI_USED, - false, - CI_INT, - "64", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_INDEXES, - "MaxNoOfIndexes", - DB_TOKEN, - "Total number of indexes that can be defined in the system", - CI_DEPRICATED, - false, - CI_INT, - "128", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_INDEX_OPS, - "MaxNoOfConcurrentIndexOperations", - DB_TOKEN, - "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "8K", - "0", - STR_VALUE(MAX_INT_RNIL) - }, - - { - CFG_DB_NO_TRIGGERS, - "MaxNoOfTriggers", - DB_TOKEN, - "Total number of triggers that can be defined in the system", - CI_USED, - false, - CI_INT, - "768", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_TRIGGER_OPS, - "MaxNoOfFiredTriggers", - DB_TOKEN, - "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "4000", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - KEY_INTERNAL, - "ExecuteOnComputer", - DB_TOKEN, - "String referencing an earlier defined COMPUTER", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_DB_NO_SAVE_MSGS, - "MaxNoOfSavedMessages", - DB_TOKEN, - "Max number of error messages in error log and max number of trace files", - CI_USED, - true, - CI_INT, - "25", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_MEMLOCK, - "LockPagesInMainMemory", - DB_TOKEN, - "If set to yes, then NDB Cluster data will not be swapped out to disk", - CI_USED, - true, - CI_BOOL, - "false", - "false", - "true" }, - - { - CFG_DB_WATCHDOG_INTERVAL, - "TimeBetweenWatchDogCheck", - DB_TOKEN, - "Time between execution checks inside a database node", - CI_USED, - true, - CI_INT, - "6000", - "70", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_STOP_ON_ERROR, - "StopOnError", - DB_TOKEN, - "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure", - CI_USED, - true, - CI_BOOL, - "true", - "false", - "true" }, - - { - CFG_DB_STOP_ON_ERROR_INSERT, - "RestartOnErrorInsert", - DB_TOKEN, - "See src/kernel/vm/Emulator.hpp NdbRestartType for details", - CI_INTERNAL, - true, - CI_INT, - "2", - "0", - "4" }, - - { - CFG_DB_NO_OPS, - "MaxNoOfConcurrentOperations", - DB_TOKEN, - "Max number of operation records in transaction coordinator", - CI_USED, - false, - CI_INT, - "32k", - "32", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_LOCAL_OPS, - "MaxNoOfLocalOperations", - DB_TOKEN, - "Max number of operation records defined in the local storage node", - CI_USED, - false, - CI_INT, - UNDEFINED, - "32", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_LOCAL_SCANS, - "MaxNoOfLocalScans", - DB_TOKEN, - "Max number of fragment scans in parallel in the local storage node", - CI_USED, - false, - CI_INT, - UNDEFINED, - "32", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BATCH_SIZE, - "BatchSizePerLocalScan", - DB_TOKEN, - "Used to calculate the number of lock records for scan with hold lock", - CI_USED, - false, - CI_INT, - STR_VALUE(DEF_BATCH_SIZE), - "1", - STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) }, - - { - CFG_DB_NO_TRANSACTIONS, - "MaxNoOfConcurrentTransactions", - DB_TOKEN, - "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "4096", - "32", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_NO_SCANS, - "MaxNoOfConcurrentScans", - DB_TOKEN, - "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "256", - "2", - "500" }, - - { - CFG_DB_TRANS_BUFFER_MEM, - "TransactionBufferMemory", - DB_TOKEN, - "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "1M", - "1K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_INDEX_MEM, - "IndexMemory", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes", - CI_USED, - false, - CI_INT64, - "18M", - "1M", - "1024G" }, - - { - CFG_DB_DATA_MEM, - "DataMemory", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data", - CI_USED, - false, - CI_INT64, - "80M", - "1M", - "1024G" }, - - { - CFG_DB_UNDO_INDEX_BUFFER, - "UndoIndexBuffer", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part", - CI_USED, - false, - CI_INT, - "2M", - "1M", - STR_VALUE(MAX_INT_RNIL)}, - - { - CFG_DB_UNDO_DATA_BUFFER, - "UndoDataBuffer", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part", - CI_USED, - false, - CI_INT, - "16M", - "1M", - STR_VALUE(MAX_INT_RNIL)}, - - { - CFG_DB_REDO_BUFFER, - "RedoBuffer", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs", - CI_USED, - false, - CI_INT, - "8M", - "1M", - STR_VALUE(MAX_INT_RNIL)}, - - { - CFG_DB_LONG_SIGNAL_BUFFER, - "LongMessageBuffer", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages", - CI_USED, - false, - CI_INT, - "1M", - "512k", - STR_VALUE(MAX_INT_RNIL)}, - - { - CFG_DB_DISK_PAGE_BUFFER_MEMORY, - "DiskPageBufferMemory", - DB_TOKEN, - "Number bytes on each "DB_TOKEN_PRINT" node allocated for disk page buffer cache", - CI_USED, - false, - CI_INT64, - "64M", - "4M", - "1024G" }, - - { - CFG_DB_SGA, - "SharedGlobalMemory", - DB_TOKEN, - "Total number bytes on each "DB_TOKEN_PRINT" node allocated for any use", - CI_USED, - false, - CI_INT64, - "20M", - "0", - "65536G" }, // 32k pages * 32-bit i value - - { - CFG_DB_START_PARTIAL_TIMEOUT, - "StartPartialTimeout", - DB_TOKEN, - "Time to wait before trying to start wo/ all nodes. 0=Wait forever", - CI_USED, - true, - CI_INT, - "30000", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_START_PARTITION_TIMEOUT, - "StartPartitionedTimeout", - DB_TOKEN, - "Time to wait before trying to start partitioned. 0=Wait forever", - CI_USED, - true, - CI_INT, - "60000", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_START_FAILURE_TIMEOUT, - "StartFailureTimeout", - DB_TOKEN, - "Time to wait before terminating. 0=Wait forever", - CI_USED, - true, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_HEARTBEAT_INTERVAL, - "HeartbeatIntervalDbDb", - DB_TOKEN, - "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs", - CI_USED, - true, - CI_INT, - "1500", - "10", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_API_HEARTBEAT_INTERVAL, - "HeartbeatIntervalDbApi", - DB_TOKEN, - "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs", - CI_USED, - true, - CI_INT, - "1500", - "100", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_LCP_INTERVAL, - "TimeBetweenLocalCheckpoints", - DB_TOKEN, - "Time between taking snapshots of the database (expressed in 2log of bytes)", - CI_USED, - true, - CI_INT, - "20", - "0", - "31" }, - - { - CFG_DB_GCP_INTERVAL, - "TimeBetweenGlobalCheckpoints", - DB_TOKEN, - "Time between doing group commit of transactions to disk", - CI_USED, - true, - CI_INT, - "2000", - "10", - "32000" }, - - { - CFG_DB_NO_REDOLOG_FILES, - "NoOfFragmentLogFiles", - DB_TOKEN, - "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node", - CI_USED, - false, - CI_INT, - "16", - "3", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_MAX_OPEN_FILES, - "MaxNoOfOpenFiles", - DB_TOKEN, - "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", - CI_USED, - false, - CI_INT, - "40", - "20", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_INITIAL_OPEN_FILES, - "InitialNoOfOpenFiles", - DB_TOKEN, - "Initial number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", - CI_USED, - false, - CI_INT, - "27", - "20", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_TRANSACTION_CHECK_INTERVAL, - "TimeBetweenInactiveTransactionAbortCheck", - DB_TOKEN, - "Time between inactive transaction checks", - CI_USED, - true, - CI_INT, - "1000", - "1000", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, - "TransactionInactiveTimeout", - DB_TOKEN, - "Time application can wait before executing another transaction part (ms).\n" - "This is the time the transaction coordinator waits for the application\n" - "to execute or send another part (query, statement) of the transaction.\n" - "If the application takes too long time, the transaction gets aborted.\n" - "Timeout set to 0 means that we don't timeout at all on application wait.", - CI_USED, - true, - CI_INT, - STR_VALUE(MAX_INT_RNIL), - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, - "TransactionDeadlockDetectionTimeout", - DB_TOKEN, - "Time transaction can be executing in a DB node (ms).\n" - "This is the time the transaction coordinator waits for each database node\n" - "of the transaction to execute a request. If the database node takes too\n" - "long time, the transaction gets aborted.", - CI_USED, - true, - CI_INT, - "1200", - "50", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_LCP_DISC_PAGES_TUP_SR, - "NoOfDiskPagesToDiskDuringRestartTUP", - DB_TOKEN, - "DiskCheckpointSpeedSr", - CI_DEPRICATED, - true, - CI_INT, - "40", - "1", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_LCP_DISC_PAGES_TUP, - "NoOfDiskPagesToDiskAfterRestartTUP", - DB_TOKEN, - "DiskCheckpointSpeed", - CI_DEPRICATED, - true, - CI_INT, - "40", - "1", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_LCP_DISC_PAGES_ACC_SR, - "NoOfDiskPagesToDiskDuringRestartACC", - DB_TOKEN, - "DiskCheckpointSpeedSr", - CI_DEPRICATED, - true, - CI_INT, - "20", - "1", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_LCP_DISC_PAGES_ACC, - "NoOfDiskPagesToDiskAfterRestartACC", - DB_TOKEN, - "DiskCheckpointSpeed", - CI_DEPRICATED, - true, - CI_INT, - "20", - "1", - STR_VALUE(MAX_INT_RNIL) }, - - - { - CFG_DB_DISCLESS, - "Diskless", - DB_TOKEN, - "Run wo/ disk", - CI_USED, - true, - CI_BOOL, - "false", - "false", - "true"}, - - { - KEY_INTERNAL, - "Discless", - DB_TOKEN, - "Diskless", - CI_DEPRICATED, - true, - CI_BOOL, - "false", - "false", - "true"}, - - - - { - CFG_DB_ARBIT_TIMEOUT, - "ArbitrationTimeout", - DB_TOKEN, - "Max time (milliseconds) database partion waits for arbitration signal", - CI_USED, - false, - CI_INT, - "3000", - "10", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_NODE_DATADIR, - "DataDir", - DB_TOKEN, - "Data directory for this node", - CI_USED, - false, - CI_STRING, - MYSQLCLUSTERDIR, - 0, 0 }, - - { - CFG_DB_FILESYSTEM_PATH, - "FileSystemPath", - DB_TOKEN, - "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_LOGLEVEL_STARTUP, - "LogLevelStartup", - DB_TOKEN, - "Node startup info printed on stdout", - CI_USED, - false, - CI_INT, - "1", - "0", - "15" }, - - { - CFG_LOGLEVEL_SHUTDOWN, - "LogLevelShutdown", - DB_TOKEN, - "Node shutdown info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_STATISTICS, - "LogLevelStatistic", - DB_TOKEN, - "Transaction, operation, transporter info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_CHECKPOINT, - "LogLevelCheckpoint", - DB_TOKEN, - "Local and Global checkpoint info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_NODERESTART, - "LogLevelNodeRestart", - DB_TOKEN, - "Node restart, node failure info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_CONNECTION, - "LogLevelConnection", - DB_TOKEN, - "Node connect/disconnect info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_CONGESTION, - "LogLevelCongestion", - DB_TOKEN, - "Congestion info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_ERROR, - "LogLevelError", - DB_TOKEN, - "Transporter, heartbeat errors printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - { - CFG_LOGLEVEL_INFO, - "LogLevelInfo", - DB_TOKEN, - "Heartbeat and log info printed on stdout", - CI_USED, - false, - CI_INT, - "0", - "0", - "15" }, - - /** - * Backup - */ - { - CFG_DB_PARALLEL_BACKUPS, - "ParallelBackups", - DB_TOKEN, - "Maximum number of parallel backups", - CI_NOTIMPLEMENTED, - false, - CI_INT, - "1", - "1", - "1" }, - - { - CFG_DB_BACKUP_DATADIR, - "BackupDataDir", - DB_TOKEN, - "Path to where to store backups", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_DB_DISK_SYNCH_SIZE, - "DiskSyncSize", - DB_TOKEN, - "Data written to a file before a synch is forced", - CI_USED, - false, - CI_INT, - "4M", - "32k", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_CHECKPOINT_SPEED, - "DiskCheckpointSpeed", - DB_TOKEN, - "Bytes per second allowed to be written by checkpoint", - CI_USED, - false, - CI_INT, - "10M", - "1M", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_CHECKPOINT_SPEED_SR, - "DiskCheckpointSpeedInRestart", - DB_TOKEN, - "Bytes per second allowed to be written by checkpoint during restart", - CI_USED, - false, - CI_INT, - "100M", - "1M", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BACKUP_MEM, - "BackupMemory", - DB_TOKEN, - "Total memory allocated for backups per node (in bytes)", - CI_USED, - false, - CI_INT, - "4M", // sum of BackupDataBufferSize and BackupLogBufferSize - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BACKUP_DATA_BUFFER_MEM, - "BackupDataBufferSize", - DB_TOKEN, - "Default size of databuffer for a backup (in bytes)", - CI_USED, - false, - CI_INT, - "2M", // remember to change BackupMemory - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BACKUP_LOG_BUFFER_MEM, - "BackupLogBufferSize", - DB_TOKEN, - "Default size of logbuffer for a backup (in bytes)", - CI_USED, - false, - CI_INT, - "2M", // remember to change BackupMemory - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BACKUP_WRITE_SIZE, - "BackupWriteSize", - DB_TOKEN, - "Default size of filesystem writes made by backup (in bytes)", - CI_USED, - false, - CI_INT, - "32K", - "2K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_BACKUP_MAX_WRITE_SIZE, - "BackupMaxWriteSize", - DB_TOKEN, - "Max size of filesystem writes made by backup (in bytes)", - CI_USED, - false, - CI_INT, - "256K", - "2K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_DB_STRING_MEMORY, - "StringMemory", - DB_TOKEN, - "Default size of string memory (0 -> 5% of max 1-100 -> %of max, >100 -> actual bytes)", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - /*************************************************************************** - * API - ***************************************************************************/ - { - CFG_SECTION_NODE, - API_TOKEN, - API_TOKEN, - "Node section", - CI_USED, - false, - CI_SECTION, - (const char *)NODE_TYPE_API, - 0, 0 - }, - - { - CFG_NODE_HOST, - "HostName", - API_TOKEN, - "Name of computer for this node", - CI_INTERNAL, - false, - CI_STRING, - "", - 0, 0 }, - - { - CFG_NODE_SYSTEM, - "System", - API_TOKEN, - "Name of system for this node", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - KEY_INTERNAL, - "Id", - API_TOKEN, - "", - CI_DEPRICATED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - CFG_NODE_ID, - "NodeId", - API_TOKEN, - "Number identifying application node ("API_TOKEN_PRINT")", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - KEY_INTERNAL, - "ExecuteOnComputer", - API_TOKEN, - "String referencing an earlier defined COMPUTER", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_NODE_ARBIT_RANK, - "ArbitrationRank", - API_TOKEN, - "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", - CI_USED, - false, - CI_INT, - "0", - "0", - "2" }, - - { - CFG_NODE_ARBIT_DELAY, - "ArbitrationDelay", - API_TOKEN, - "When asked to arbitrate, arbitrator waits this long before voting (msec)", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_MAX_SCAN_BATCH_SIZE, - "MaxScanBatchSize", - "API", - "The maximum collective batch size for one scan", - CI_USED, - false, - CI_INT, - STR_VALUE(MAX_SCAN_BATCH_SIZE), - "32k", - "16M" }, - - { - CFG_BATCH_BYTE_SIZE, - "BatchByteSize", - "API", - "The default batch size in bytes", - CI_USED, - false, - CI_INT, - STR_VALUE(SCAN_BATCH_SIZE), - "1k", - "1M" }, - - { - CFG_BATCH_SIZE, - "BatchSize", - "API", - "The default batch size in number of records", - CI_USED, - false, - CI_INT, - STR_VALUE(DEF_BATCH_SIZE), - "1", - STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) }, - - /**************************************************************************** - * MGM - ***************************************************************************/ - { - CFG_SECTION_NODE, - MGM_TOKEN, - MGM_TOKEN, - "Node section", - CI_USED, - false, - CI_SECTION, - (const char *)NODE_TYPE_MGM, - 0, 0 - }, - - { - CFG_NODE_HOST, - "HostName", - MGM_TOKEN, - "Name of computer for this node", - CI_INTERNAL, - false, - CI_STRING, - "", - 0, 0 }, - - { - CFG_NODE_DATADIR, - "DataDir", - MGM_TOKEN, - "Data directory for this node", - CI_USED, - false, - CI_STRING, - MYSQLCLUSTERDIR, - 0, 0 }, - - { - CFG_NODE_SYSTEM, - "System", - MGM_TOKEN, - "Name of system for this node", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - KEY_INTERNAL, - "Id", - MGM_TOKEN, - "", - CI_DEPRICATED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - CFG_NODE_ID, - "NodeId", - MGM_TOKEN, - "Number identifying the management server node ("MGM_TOKEN_PRINT")", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", - STR_VALUE(MAX_NODES) }, - - { - CFG_LOG_DESTINATION, - "LogDestination", - MGM_TOKEN, - "String describing where logmessages are sent", - CI_USED, - false, - CI_STRING, - 0, - 0, 0 }, - - { - KEY_INTERNAL, - "ExecuteOnComputer", - MGM_TOKEN, - "String referencing an earlier defined COMPUTER", - CI_USED, - false, - CI_STRING, - 0, - 0, 0 }, - - { - KEY_INTERNAL, - "MaxNoOfSavedEvents", - MGM_TOKEN, - "", - CI_USED, - false, - CI_INT, - "100", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_MGM_PORT, - "PortNumber", - MGM_TOKEN, - "Port number to give commands to/fetch configurations from management server", - CI_USED, - false, - CI_INT, - NDB_PORT, - "0", - STR_VALUE(MAX_PORT_NO) }, - - { - KEY_INTERNAL, - "PortNumberStats", - MGM_TOKEN, - "Port number used to get statistical information from a management server", - CI_USED, - false, - CI_INT, - UNDEFINED, - "0", - STR_VALUE(MAX_PORT_NO) }, - - { - CFG_NODE_ARBIT_RANK, - "ArbitrationRank", - MGM_TOKEN, - "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", - CI_USED, - false, - CI_INT, - "1", - "0", - "2" }, - - { - CFG_NODE_ARBIT_DELAY, - "ArbitrationDelay", - MGM_TOKEN, - "", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - /**************************************************************************** - * TCP - ***************************************************************************/ - { - CFG_SECTION_CONNECTION, - "TCP", - "TCP", - "Connection section", - CI_USED, - false, - CI_SECTION, - (const char *)CONNECTION_TYPE_TCP, - 0, 0 - }, - - { - CFG_CONNECTION_HOSTNAME_1, - "HostName1", - "TCP", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_2, - "HostName2", - "TCP", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_1, - "NodeId1", - "TCP", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2, - "NodeId2", - "TCP", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - CFG_CONNECTION_GROUP, - "Group", - "TCP", - "", - CI_USED, - false, - CI_INT, - "55", - "0", "200" }, - - { - CFG_CONNECTION_NODE_ID_SERVER, - "NodeIdServer", - "TCP", - "", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", "63" }, - - { - CFG_CONNECTION_SEND_SIGNAL_ID, - "SendSignalId", - "TCP", - "Sends id in each signal. Used in trace files.", - CI_USED, - false, - CI_BOOL, - "true", - "false", - "true" }, - - - { - CFG_CONNECTION_CHECKSUM, - "Checksum", - "TCP", - "If checksum is enabled, all signals between nodes are checked for errors", - CI_USED, - false, - CI_BOOL, - "false", - "false", - "true" }, - - { - CFG_CONNECTION_SERVER_PORT, - "PortNumber", - "TCP", - "Port used for this transporter", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_PORT_NO) }, - - { - CFG_TCP_SEND_BUFFER_SIZE, - "SendBufferMemory", - "TCP", - "Bytes of buffer for signals sent from this node", - CI_USED, - false, - CI_INT, - "256K", - "64K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_TCP_RECEIVE_BUFFER_SIZE, - "ReceiveBufferMemory", - "TCP", - "Bytes of buffer for signals received by this node", - CI_USED, - false, - CI_INT, - "64K", - "16K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_TCP_PROXY, - "Proxy", - "TCP", - "", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_1_SYSTEM, - "NodeId1_System", - "TCP", - "System for node 1 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2_SYSTEM, - "NodeId2_System", - "TCP", - "System for node 2 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - - /**************************************************************************** - * SHM - ***************************************************************************/ - { - CFG_SECTION_CONNECTION, - "SHM", - "SHM", - "Connection section", - CI_USED, - false, - CI_SECTION, - (const char *)CONNECTION_TYPE_SHM, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_1, - "HostName1", - "SHM", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_2, - "HostName2", - "SHM", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_SERVER_PORT, - "PortNumber", - "SHM", - "Port used for this transporter", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_PORT_NO) }, - - { - CFG_SHM_SIGNUM, - "Signum", - "SHM", - "Signum to be used for signalling", - CI_USED, - false, - CI_INT, - UNDEFINED, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_1, - "NodeId1", - "SHM", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2, - "NodeId2", - "SHM", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - 0, 0 }, - - { - CFG_CONNECTION_GROUP, - "Group", - "SHM", - "", - CI_USED, - false, - CI_INT, - "35", - "0", "200" }, - - { - CFG_CONNECTION_NODE_ID_SERVER, - "NodeIdServer", - "SHM", - "", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", "63" }, - - { - CFG_CONNECTION_SEND_SIGNAL_ID, - "SendSignalId", - "SHM", - "Sends id in each signal. Used in trace files.", - CI_USED, - false, - CI_BOOL, - "false", - "false", - "true" }, - - - { - CFG_CONNECTION_CHECKSUM, - "Checksum", - "SHM", - "If checksum is enabled, all signals between nodes are checked for errors", - CI_USED, - false, - CI_BOOL, - "true", - "false", - "true" }, - - { - CFG_SHM_KEY, - "ShmKey", - "SHM", - "A shared memory key", - CI_USED, - false, - CI_INT, - UNDEFINED, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_SHM_BUFFER_MEM, - "ShmSize", - "SHM", - "Size of shared memory segment", - CI_USED, - false, - CI_INT, - "1M", - "64K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_1_SYSTEM, - "NodeId1_System", - "SHM", - "System for node 1 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2_SYSTEM, - "NodeId2_System", - "SHM", - "System for node 2 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - /**************************************************************************** - * SCI - ***************************************************************************/ - { - CFG_SECTION_CONNECTION, - "SCI", - "SCI", - "Connection section", - CI_USED, - false, - CI_SECTION, - (const char *)CONNECTION_TYPE_SCI, - 0, 0 - }, - - { - CFG_CONNECTION_NODE_1, - "NodeId1", - "SCI", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_2, - "NodeId2", - "SCI", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_STRING, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_GROUP, - "Group", - "SCI", - "", - CI_USED, - false, - CI_INT, - "15", - "0", "200" }, - - { - CFG_CONNECTION_NODE_ID_SERVER, - "NodeIdServer", - "SCI", - "", - CI_USED, - false, - CI_INT, - MANDATORY, - "1", "63" }, - - { - CFG_CONNECTION_HOSTNAME_1, - "HostName1", - "SCI", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_2, - "HostName2", - "SCI", - "Name/IP of computer on one side of the connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_SERVER_PORT, - "PortNumber", - "SCI", - "Port used for this transporter", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_PORT_NO) }, - - { - CFG_SCI_HOST1_ID_0, - "Host1SciId0", - "SCI", - "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_SCI_HOST1_ID_1, - "Host1SciId1", - "SCI", - "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_SCI_HOST2_ID_0, - "Host2SciId0", - "SCI", - "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_SCI_HOST2_ID_1, - "Host2SciId1", - "SCI", - "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)", - CI_USED, - false, - CI_INT, - "0", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_SEND_SIGNAL_ID, - "SendSignalId", - "SCI", - "Sends id in each signal. Used in trace files.", - CI_USED, - false, - CI_BOOL, - "true", - "false", - "true" }, - - { - CFG_CONNECTION_CHECKSUM, - "Checksum", - "SCI", - "If checksum is enabled, all signals between nodes are checked for errors", - CI_USED, - false, - CI_BOOL, - "false", - "false", - "true" }, - - { - CFG_SCI_SEND_LIMIT, - "SendLimit", - "SCI", - "Transporter send buffer contents are sent when this no of bytes is buffered", - CI_USED, - false, - CI_INT, - "8K", - "128", - "32K" }, - - { - CFG_SCI_BUFFER_MEM, - "SharedBufferSize", - "SCI", - "Size of shared memory segment", - CI_USED, - false, - CI_INT, - "1M", - "64K", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_1_SYSTEM, - "NodeId1_System", - "SCI", - "System for node 1 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2_SYSTEM, - "NodeId2_System", - "SCI", - "System for node 2 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - /**************************************************************************** - * OSE - ***************************************************************************/ - { - CFG_SECTION_CONNECTION, - "OSE", - "OSE", - "Connection section", - CI_USED, - false, - CI_SECTION, - (const char *)CONNECTION_TYPE_OSE, - 0, 0 - }, - - { - CFG_CONNECTION_HOSTNAME_1, - "HostName1", - "OSE", - "Name of computer on one side of the connection", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_2, - "HostName2", - "OSE", - "Name of computer on one side of the connection", - CI_USED, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_1, - "NodeId1", - "OSE", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_2, - "NodeId2", - "OSE", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - CI_USED, - false, - CI_INT, - UNDEFINED, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_SEND_SIGNAL_ID, - "SendSignalId", - "OSE", - "Sends id in each signal. Used in trace files.", - CI_USED, - false, - CI_BOOL, - "true", - "false", - "true" }, - - { - CFG_CONNECTION_CHECKSUM, - "Checksum", - "OSE", - "If checksum is enabled, all signals between nodes are checked for errors", - CI_USED, - false, - CI_BOOL, - "false", - "false", - "true" }, - - { - CFG_CONNECTION_NODE_1_SYSTEM, - "NodeId1_System", - "OSE", - "System for node 1 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2_SYSTEM, - "NodeId2_System", - "OSE", - "System for node 2 in connection", - CI_INTERNAL, - false, - CI_STRING, - UNDEFINED, - 0, 0 }, -}; - -const int ParamInfoNum = sizeof(ParamInfoArray) / sizeof(ParamInfo); diff --git a/storage/ndb/src/mgmsrv/ParamInfo.hpp b/storage/ndb/src/mgmsrv/ParamInfo.hpp deleted file mode 100644 index fa65fff461f..00000000000 --- a/storage/ndb/src/mgmsrv/ParamInfo.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (C) 2006 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef PARAMINFO_H -#define PARAMINFO_H - -#define DB_TOKEN "DB" -#define MGM_TOKEN "MGM" -#define API_TOKEN "API" - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * The Configuration parameter type and status - */ - -enum ParameterType { CI_BOOL, CI_INT, CI_INT64, CI_STRING, CI_SECTION }; -enum ParameterStatus { CI_USED, ///< Active - CI_DEPRICATED, ///< Can be, but shouldn't - CI_NOTIMPLEMENTED, ///< Is ignored. - CI_INTERNAL ///< Not configurable by the user -}; - -/** - * Entry for one configuration parameter - */ -typedef struct m_ParamInfo { - Uint32 _paramId; - const char* _fname; - const char* _section; - const char* _description; - ParameterStatus _status; - bool _updateable; - ParameterType _type; - const char* _default; - const char* _min; - const char* _max; -}ParamInfo; - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp index d76ae4d6be5..9272b5ab532 100644 --- a/storage/ndb/src/mgmsrv/Services.cpp +++ b/storage/ndb/src/mgmsrv/Services.cpp @@ -18,7 +18,7 @@ #include <uucode.h> #include <socket_io.h> -#include <ndb_version.h> +#include <util/version.h> #include <mgmapi.h> #include <EventLogger.hpp> #include <signaldata/SetLogLevelOrd.hpp> @@ -288,18 +288,23 @@ struct PurgeStruct NDB_TICKS tick; }; +#define ERROR_INSERTED(x) (g_errorInsert == x || m_errorInsert == x) + +#define SLEEP_ERROR_INSERTED(x) if(ERROR_INSERTED(x)){NdbSleep_SecSleep(10);} + MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock, Uint64 session_id) : SocketServer::Session(sock), m_mgmsrv(mgm) { DBUG_ENTER("MgmApiSession::MgmApiSession"); - m_input = new SocketInputStream(sock); - m_output = new SocketOutputStream(sock); + m_input = new SocketInputStream(sock, 30000); + m_output = new SocketOutputStream(sock, 30000); m_parser = new Parser_t(commands, *m_input, true, true, true); m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv); m_stopSelf= 0; m_ctx= NULL; m_session_id= session_id; m_mutex= NdbMutex_Create(); + m_errorInsert= 0; DBUG_VOID_RETURN; } @@ -339,6 +344,9 @@ MgmApiSession::runSession() while(!stop) { NdbMutex_Lock(m_mutex); + m_input->reset_timeout(); + m_output->reset_timeout(); + m_parser->run(ctx, *this); if(ctx.m_currentToken == 0) @@ -349,19 +357,6 @@ MgmApiSession::runSession() switch(ctx.m_status) { case Parser_t::UnknownCommand: -#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT - /* Backwards compatibility for old NDBs that still use - * the old "GET CONFIG" command. - */ - size_t i; - for(i=0; i<strlen(ctx.m_currentToken); i++) - ctx.m_currentToken[i] = toupper(ctx.m_currentToken[i]); - - if(strncmp("GET CONFIG ", - ctx.m_currentToken, - strlen("GET CONFIG ")) == 0) - getConfig_old(ctx); -#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */ break; default: break; @@ -382,32 +377,6 @@ MgmApiSession::runSession() DBUG_VOID_RETURN; } -#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT -void -MgmApiSession::getConfig_old(Parser_t::Context &ctx) { - Properties args; - - Uint32 version, node; - - if(sscanf(ctx.m_currentToken, "GET CONFIG %d %d", - (int *)&version, (int *)&node) != 2) { - m_output->println("Expected 2 arguments for GET CONFIG"); - return; - } - - /* Put arguments in properties object so we can call the real function */ - args.put("version", version); - args.put("node", node); - getConfig_common(ctx, args, true); -} -#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */ - -void -MgmApiSession::getConfig(Parser_t::Context &ctx, - const class Properties &args) { - getConfig_common(ctx, args); -} - static Properties * backward(const char * base, const Properties* reply){ Properties * ret = new Properties(); @@ -584,9 +553,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &, } void -MgmApiSession::getConfig_common(Parser_t::Context &, - const class Properties &args, - bool compat) { +MgmApiSession::getConfig(Parser_t::Context &, + const class Properties &args) +{ Uint32 version, node = 0; args.get("version", &version); @@ -600,47 +569,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &, return; } - if(version > 0 && version < makeVersion(3, 5, 0) && compat){ - Properties *reply = backward("", conf->m_oldConfig); - reply->put("Version", version); - reply->put("LocalNodeId", node); - - backward("", reply); - //reply->print(); - - const Uint32 size = reply->getPackedSize(); - Uint32 *buffer = new Uint32[size/4+1]; - - reply->pack(buffer); - delete reply; - - const int uurows = (size + 44)/45; - char * uubuf = new char[uurows * 62+5]; - - const int uusz = uuencode_mem(uubuf, (char *)buffer, size); - delete[] buffer; - - m_output->println("GET CONFIG %d %d %d %d %d", - 0, version, node, size, uusz); - - m_output->println("begin 664 Ndb_cfg.bin"); - - /* XXX Need to write directly to the socket, because the uubuf is not - * NUL-terminated. This could/should probably be done in a nicer way. - */ - write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz); - delete[] uubuf; - - m_output->println("end"); - m_output->println(""); - return; - } - - if(compat){ - m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0); - return; - } - if(node != 0){ bool compatible; switch (m_mgmsrv.getNodeType(node)) { @@ -669,21 +597,30 @@ MgmApiSession::getConfig_common(Parser_t::Context &, NdbMutex_Lock(m_mgmsrv.m_configMutex); const ConfigValues * cfg = &conf->m_configValues->m_config; - const Uint32 size = cfg->getPackedSize(); UtilBuffer src; cfg->pack(src); NdbMutex_Unlock(m_mgmsrv.m_configMutex); char *tmp_str = (char *) malloc(base64_needed_encoded_length(src.length())); - int res = base64_encode(src.get_data(), src.length(), tmp_str); - + (void) base64_encode(src.get_data(), src.length(), tmp_str); + + SLEEP_ERROR_INSERTED(1); + m_output->println("get config reply"); m_output->println("result: Ok"); m_output->println("Content-Length: %d", strlen(tmp_str)); m_output->println("Content-Type: ndbconfig/octet-stream"); + SLEEP_ERROR_INSERTED(2); m_output->println("Content-Transfer-Encoding: base64"); m_output->println(""); + if(ERROR_INSERTED(3)) + { + int l= strlen(tmp_str); + tmp_str[l/2]='\0'; + m_output->println(tmp_str); + NdbSleep_SecSleep(10); + } m_output->println(tmp_str); free(tmp_str); @@ -694,11 +631,22 @@ void MgmApiSession::insertError(Parser<MgmApiSession>::Context &, Properties const &args) { Uint32 node = 0, error = 0; + int result= 0; args.get("node", &node); args.get("error", &error); - int result = m_mgmsrv.insertError(node, error); + if(node==m_mgmsrv.getOwnNodeId() + && error < MGM_ERROR_MAX_INJECT_SESSION_ONLY) + { + m_errorInsert= error; + if(error==0) + g_errorInsert= error; + } + else + { + result= m_mgmsrv.insertError(node, error); + } m_output->println("insert error reply"); if(result != 0) @@ -815,6 +763,7 @@ MgmApiSession::endSession(Parser<MgmApiSession>::Context &, m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv); + SLEEP_ERROR_INSERTED(4); m_output->println("end session reply"); } @@ -850,8 +799,6 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &, const char *reply= "set cluster loglevel reply"; Uint32 node, level, cat; BaseString errorString; - SetLogLevelOrd logLevel; - int result; DBUG_ENTER("MgmApiSession::setClusterLogLevel"); args.get("node", &node); args.get("category", &cat); @@ -859,8 +806,7 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &, DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level)); - /* XXX should use constants for this value */ - if(level > 15) { + if(level > NDB_MGM_MAX_LOGLEVEL) { m_output->println(reply); m_output->println("result: Invalid loglevel %d", level); m_output->println(""); @@ -898,14 +844,12 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &, Uint32 node = 0, level = 0, cat; BaseString errorString; SetLogLevelOrd logLevel; - int result; logLevel.clear(); args.get("node", &node); args.get("category", &cat); args.get("level", &level); - /* XXX should use constants for this value */ - if(level > 15) { + if(level > NDB_MGM_MAX_LOGLEVEL) { m_output->println("set loglevel reply"); m_output->println("result: Invalid loglevel", errorString.c_str()); m_output->println(""); @@ -1070,12 +1014,16 @@ MgmApiSession::getStatus(Parser<MgmApiSession>::Context &, while(m_mgmsrv.getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_MGM)){ noOfNodes++; } - + SLEEP_ERROR_INSERTED(5); m_output->println("node status"); + SLEEP_ERROR_INSERTED(6); m_output->println("nodes: %d", noOfNodes); + SLEEP_ERROR_INSERTED(7); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_NDB); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_MGM); + SLEEP_ERROR_INSERTED(8); printNodeStatus(m_output, m_mgmsrv, NDB_MGM_NODE_TYPE_API); + SLEEP_ERROR_INSERTED(9); nodeId = 0; @@ -1190,8 +1138,10 @@ MgmApiSession::enterSingleUser(Parser<MgmApiSession>::Context &, Properties const &args) { int stopped = 0; Uint32 nodeId = 0; + int result= 0; args.get("nodeId", &nodeId); - int result = m_mgmsrv.enterSingleUser(&stopped, nodeId); + + result = m_mgmsrv.enterSingleUser(&stopped, nodeId); m_output->println("enter single user reply"); if(result != 0) { m_output->println("result: %s", get_error_text(result)); @@ -1327,6 +1277,8 @@ MgmApiSession::setLogFilter(Parser_t::Context &ctx, m_output->println(""); } +#ifdef NOT_USED + static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -1336,6 +1288,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif void Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId){ @@ -1384,20 +1337,21 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId) { if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)) { - NDB_SOCKET_TYPE fd= m_clients[i].m_socket; - if(fd != NDB_INVALID_SOCKET) + if(m_clients[i].m_socket==NDB_INVALID_SOCKET) + continue; + + SocketOutputStream out(m_clients[i].m_socket); + + int r; + if (m_clients[i].m_parsable) + r= out.println(str.c_str()); + else + r= out.println(m_text); + + if (r<0) { - int r; - if (m_clients[i].m_parsable) - r= println_socket(fd, - MAX_WRITE_TIMEOUT, str.c_str()); - else - r= println_socket(fd, - MAX_WRITE_TIMEOUT, m_text); - if (r == -1) { - copy.push_back(fd); - m_clients.erase(i, false); - } + copy.push_back(m_clients[i].m_socket); + m_clients.erase(i, false); } } } @@ -1448,14 +1402,16 @@ Ndb_mgmd_event_service::check_listeners() m_clients.lock(); for(i= m_clients.size() - 1; i >= 0; i--) { - int fd= m_clients[i].m_socket; - DBUG_PRINT("info",("%d %d",i,fd)); - char buf[1]; - buf[0]=0; - if (fd != NDB_INVALID_SOCKET && - println_socket(fd,MAX_WRITE_TIMEOUT,"<PING>") == -1) + if(m_clients[i].m_socket==NDB_INVALID_SOCKET) + continue; + + SocketOutputStream out(m_clients[i].m_socket); + + DBUG_PRINT("info",("%d %d",i,m_clients[i].m_socket)); + + if(out.println("<PING>") < 0) { - NDB_CLOSE_SOCKET(fd); + NDB_CLOSE_SOCKET(m_clients[i].m_socket); m_clients.erase(i, false); n=1; } @@ -1605,7 +1561,7 @@ MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx, } int level = atoi(spec[1].c_str()); - if(level < 0 || level > 15){ + if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){ msg.appfmt("Invalid level: >%s<", spec[1].c_str()); result = -1; goto done; @@ -1685,8 +1641,11 @@ void MgmApiSession::check_connection(Parser_t::Context &ctx, const class Properties &args) { + SLEEP_ERROR_INSERTED(1); m_output->println("check connection reply"); + SLEEP_ERROR_INSERTED(2); m_output->println("result: Ok"); + SLEEP_ERROR_INSERTED(3); m_output->println(""); } @@ -1706,7 +1665,9 @@ MgmApiSession::get_mgmd_nodeid(Parser_t::Context &ctx, Properties const &args) { m_output->println("get mgmd nodeid reply"); - m_output->println("nodeid:%u",m_mgmsrv.getOwnNodeId()); + m_output->println("nodeid:%u",m_mgmsrv.getOwnNodeId()); + SLEEP_ERROR_INSERTED(1); + m_output->println(""); } diff --git a/storage/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp index f6af16d58ba..76f839e3aab 100644 --- a/storage/ndb/src/mgmsrv/Services.hpp +++ b/storage/ndb/src/mgmsrv/Services.hpp @@ -24,9 +24,6 @@ #include "MgmtSrvr.hpp" -/** Undefine this to remove backwards compatibility for "GET CONFIG". */ -#define MGM_GET_CONFIG_BACKWARDS_COMPAT - class MgmApiSession : public SocketServer::Session { static void stop_session_if_timed_out(SocketServer::Session *_s, void *data); @@ -49,9 +46,8 @@ private: Parser_t::Context *m_ctx; Uint64 m_session_id; - void getConfig_common(Parser_t::Context &ctx, - const class Properties &args, - bool compat = false); + int m_errorInsert; + const char *get_error_text(int err_no) { return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); } @@ -61,9 +57,6 @@ public: void runSession(); void getConfig(Parser_t::Context &ctx, const class Properties &args); -#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT - void getConfig_old(Parser_t::Context &ctx); -#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */ void get_nodeid(Parser_t::Context &ctx, const class Properties &args); void getVersion(Parser_t::Context &ctx, const class Properties &args); diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp index 1c52bf7c518..16c560868ef 100644 --- a/storage/ndb/src/mgmsrv/main.cpp +++ b/storage/ndb/src/mgmsrv/main.cpp @@ -131,8 +131,6 @@ bool g_StopServer; bool g_RestartServer; extern EventLogger g_eventLogger; -extern int global_mgmt_server_check; - enum ndb_mgmd_options { OPT_INTERACTIVE = NDB_STD_OPTIONS_LAST, OPT_NO_NODEID_CHECKS, @@ -144,29 +142,29 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_mgmd"), { "config-file", 'f', "Specify cluster configuration file", - (gptr*) &opt_config_filename, (gptr*) &opt_config_filename, 0, + (uchar**) &opt_config_filename, (uchar**) &opt_config_filename, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "print-full-config", 'P', "Print full config and exit", - (gptr*) &g_print_full_config, (gptr*) &g_print_full_config, 0, + (uchar**) &g_print_full_config, (uchar**) &g_print_full_config, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "daemon", 'd', "Run ndb_mgmd in daemon mode (default)", - (gptr*) &opt_daemon, (gptr*) &opt_daemon, 0, + (uchar**) &opt_daemon, (uchar**) &opt_daemon, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "interactive", OPT_INTERACTIVE, "Run interactive. Not supported but provided for testing purposes", - (gptr*) &opt_interactive, (gptr*) &opt_interactive, 0, + (uchar**) &opt_interactive, (uchar**) &opt_interactive, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-nodeid-checks", OPT_NO_NODEID_CHECKS, "Do not provide any node id checks", - (gptr*) &g_no_nodeid_checks, (gptr*) &g_no_nodeid_checks, 0, + (uchar**) &g_no_nodeid_checks, (uchar**) &g_no_nodeid_checks, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nodaemon", OPT_NO_DAEMON, "Don't run as daemon, but don't read from stdin", - (gptr*) &opt_non_interactive, (gptr*) &opt_non_interactive, 0, + (uchar**) &opt_non_interactive, (uchar**) &opt_non_interactive, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "mycnf", 256, "Read cluster config from my.cnf", - (gptr*) &opt_mycnf, (gptr*) &opt_mycnf, 0, + (uchar**) &opt_mycnf, (uchar**) &opt_mycnf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -190,7 +188,6 @@ static void usage() */ int main(int argc, char** argv) { - int mgm_connect_result; NDB_INIT(argv[0]); @@ -207,8 +204,6 @@ int main(int argc, char** argv) start: glob= new MgmGlobals; - global_mgmt_server_check = 1; - if (opt_interactive || opt_non_interactive || g_print_full_config) { diff --git a/storage/ndb/src/mgmsrv/ndb_mgmd_error.h b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h new file mode 100644 index 00000000000..2438f15c808 --- /dev/null +++ b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDB_MGMD_ERROR_H +#define NDB_MGMD_ERROR_H + +#define NO_CONTACT_WITH_PROCESS 5000 +#define WRONG_PROCESS_TYPE 5002 +#define SEND_OR_RECEIVE_FAILED 5005 +#define INVALID_ERROR_NUMBER 5007 +#define INVALID_TRACE_NUMBER 5008 +#define INVALID_BLOCK_NAME 5010 +#define NODE_SHUTDOWN_IN_PROGESS 5026 +#define SYSTEM_SHUTDOWN_IN_PROGRESS 5027 +#define NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH 5028 +#define NO_CONTACT_WITH_DB_NODES 5030 +#define UNSUPPORTED_NODE_SHUTDOWN 5031 +#define NODE_NOT_API_NODE 5062 +#define OPERATION_NOT_ALLOWED_START_STOP 5063 + +#endif diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index 5095c6c3856..448bc1025e8 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -16,7 +16,7 @@ #include <ndb_global.h> #include <my_pthread.h> #include <ndb_limits.h> -#include <ndb_version.h> +#include <util/version.h> #include "TransporterFacade.hpp" #include "ClusterMgr.hpp" @@ -61,6 +61,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade): clusterMgrThreadMutex = NdbMutex_Create(); waitForHBCond= NdbCondition_Create(); waitingForHB= false; + m_max_api_reg_req_interval= 0xFFFFFFFF; // MAX_INT noOfAliveNodes= 0; noOfConnectedNodes= 0; theClusterMgrThread= 0; @@ -243,7 +244,7 @@ ClusterMgr::threadMain( ){ } theFacade.lock_mutex(); - for (int i = 1; i < MAX_NODES; i++){ + for (int i = 1; i < MAX_NDB_NODES; i++){ /** * Send register request (heartbeat) to all available nodes * at specified timing intervals @@ -264,7 +265,8 @@ ClusterMgr::threadMain( ){ } theNode.hbCounter += timeSlept; - if (theNode.hbCounter >= theNode.hbFrequency) { + if (theNode.hbCounter >= m_max_api_reg_req_interval || + theNode.hbCounter >= theNode.hbFrequency) { /** * It is now time to send a new Heartbeat */ @@ -360,8 +362,6 @@ ClusterMgr::execAPI_REGREQ(const Uint32 * theData){ theFacade.sendSignalUnCond(&signal, nodeId); } -int global_mgmt_server_check = 0; // set to one in mgmtsrvr main; - void ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ const ApiRegConf * const apiRegConf = (ApiRegConf *)&theData[0]; @@ -379,7 +379,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ if(node.m_info.m_version != apiRegConf->version){ node.m_info.m_version = apiRegConf->version; - if (global_mgmt_server_check == 1) + if(theNodes[theFacade.ownId()].m_info.m_type == NodeInfo::MGM) node.compatible = ndbCompatible_mgmt_ndb(NDB_VERSION, node.m_info.m_version); else @@ -391,7 +391,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ node.m_state = apiRegConf->nodeState; if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED || - node.m_state.startLevel == NodeState::SL_SINGLEUSER)){ + node.m_state.getSingleUserMode())){ set_node_alive(node, true); } else { set_node_alive(node, false); diff --git a/storage/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp index 6e74620dd4f..0a261eb202f 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.hpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp @@ -50,6 +50,7 @@ public: void startThread(); void forceHB(); + void set_max_api_reg_req_interval(unsigned int millisec) { m_max_api_reg_req_interval = millisec; } private: void threadMain(); @@ -89,6 +90,7 @@ public: Uint32 m_connect_count; private: + Uint32 m_max_api_reg_req_interval; Uint32 noOfAliveNodes; Uint32 noOfConnectedNodes; Node theNodes[MAX_NODES]; diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp index ed3bde38d4a..04be3711847 100644 --- a/storage/ndb/src/ndbapi/DictCache.cpp +++ b/storage/ndb/src/ndbapi/DictCache.cpp @@ -154,7 +154,7 @@ void GlobalDictCache::printCache() } NdbTableImpl * -GlobalDictCache::get(const char * name) +GlobalDictCache::get(const char * name, int *error) { DBUG_ENTER("GlobalDictCache::get"); DBUG_PRINT("enter", ("name: %s", name)); @@ -165,6 +165,11 @@ GlobalDictCache::get(const char * name) versions = m_tableHash.getData(name, len); if(versions == 0){ versions = new Vector<TableVersion>(2); + if (versions == NULL) + { + *error = -1; + DBUG_RETURN(0); + } m_tableHash.insertKey(name, len, 0, versions); } @@ -211,7 +216,11 @@ GlobalDictCache::get(const char * name) tmp.m_impl = 0; tmp.m_status = RETREIVING; tmp.m_refCount = 1; // The one retreiving it - versions->push_back(tmp); + if (versions->push_back(tmp)) + { + *error = -1; + DBUG_RETURN(0); + } DBUG_PRINT("info", ("No table found")); DBUG_RETURN(0); } diff --git a/storage/ndb/src/ndbapi/DictCache.hpp b/storage/ndb/src/ndbapi/DictCache.hpp index bab027de0c8..9250ec7b196 100644 --- a/storage/ndb/src/ndbapi/DictCache.hpp +++ b/storage/ndb/src/ndbapi/DictCache.hpp @@ -67,7 +67,7 @@ public: ~GlobalDictCache(); NdbTableImpl * get(NdbTableImpl *tab); - NdbTableImpl * get(const char * name); + NdbTableImpl * get(const char * name, int *error); NdbTableImpl* put(const char * name, NdbTableImpl *); void release(NdbTableImpl *, int invalidate = 0); diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am index 90e61b5b188..8ff427772b0 100644 --- a/storage/ndb/src/ndbapi/Makefile.am +++ b/storage/ndb/src/ndbapi/Makefile.am @@ -15,6 +15,10 @@ #SUBDIRS = signal-sender +noinst_PROGRAMS = ndberror_check + +ndberror_check_SOURCES = ndberror_check.c + noinst_LTLIBRARIES = libndbapi.la libndbapi_la_SOURCES = \ @@ -51,7 +55,8 @@ libndbapi_la_SOURCES = \ ndb_cluster_connection.cpp \ NdbBlob.cpp \ NdbIndexStat.cpp \ - SignalSender.cpp + SignalSender.cpp \ + ObjectMap.cpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi @@ -61,6 +66,11 @@ NDB_CXXFLAGS_RELEASE_LOC = -O2 include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am +ndberror_check_LDFLAGS = \ + $(top_builddir)/dbug/libdbug.a \ + $(top_builddir)/mysys/libmysys.a \ + $(top_builddir)/strings/libmystrings.a + # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 0db20f723ea..15647861eef 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -37,6 +37,7 @@ Name: Ndb.cpp #include "API.hpp" #include <NdbEnv.h> #include <BaseString.hpp> +#include <NdbSqlUtil.hpp> /**************************************************************************** void connect(); @@ -58,6 +59,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tConNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive = 1; }//if @@ -81,6 +84,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive= 1; }//if @@ -109,6 +114,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode) // We have connections now to the desired node. Return //**************************************************************************** DBUG_RETURN(getConnectedNdbTransaction(tNode)); + } else if (TretCode < 0) { + DBUG_RETURN(NULL); } else if (TretCode != 0) { tAnyAlive= 1; }//if @@ -195,6 +202,12 @@ Ndb::NDB_connect(Uint32 tNode) DBUG_PRINT("info", ("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d", tReturnCode, tNdbCon->Status())); + if (theError.code == 299 || // single user mode + theError.code == 281 ) // cluster shutdown in progress + { + // no need to retry with other node + DBUG_RETURN(-1); + } DBUG_RETURN(3); }//if }//Ndb::NDB_connect() @@ -255,8 +268,6 @@ Ndb::waitUntilReady(int timeout) DBUG_ENTER("Ndb::waitUntilReady"); int secondsCounter = 0; int milliCounter = 0; - int noChecksSinceFirstAliveFound = 0; - int id; if (theInitState != Initialised) { // Ndb::init is not called @@ -295,6 +306,180 @@ Return Value: Returns a pointer to a connection object. Return NULL otherwise. Remark: Start transaction. Synchronous. *****************************************************************************/ +int +Ndb::computeHash(Uint32 *retval, + const NdbDictionary::Table *table, + const struct Key_part_ptr * keyData, + void* buf, Uint32 bufLen) +{ + Uint32 j = 0; + Uint32 sumlen = 0; // Needed len + const NdbTableImpl* impl = &NdbTableImpl::getImpl(*table); + const NdbColumnImpl* const * cols = impl->m_columns.getBase(); + Uint32 len; + char* pos; + + Uint32 colcnt = impl->m_columns.size(); + Uint32 parts = impl->m_noOfDistributionKeys; + if (parts == 0) + { + parts = impl->m_noOfKeys; + } + + for (Uint32 i = 0; i<parts; i++) + { + if (unlikely(keyData[i].ptr == 0)) + goto enullptr; + } + + if (unlikely(keyData[parts].ptr != 0)) + goto emissingnullptr; + + const NdbColumnImpl* partcols[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY]; + for (Uint32 i = 0; i<colcnt && j < parts; i++) + { + if (cols[i]->m_distributionKey) + { + // wl3717_todo + // char allowed now as dist key so this case should be tested + partcols[j++] = cols[i]; + } + } + + for (Uint32 i = 0; i<parts; i++) + { + Uint32 lb, len; + if (unlikely(!NdbSqlUtil::get_var_length(partcols[i]->m_type, + keyData[i].ptr, + keyData[i].len, + lb, len))) + goto emalformedkey; + + if (unlikely(keyData[i].len < (lb + len))) + goto elentosmall; + + Uint32 maxlen = (partcols[i]->m_attrSize * partcols[i]->m_arraySize); + + if (unlikely(lb == 0 && keyData[i].len != maxlen)) + goto emalformedkey; + + if (partcols[i]->m_cs) + { + Uint32 xmul = partcols[i]->m_cs->strxfrm_multiply; + xmul = xmul ? xmul : 1; + len = xmul * (maxlen - lb); + } + + len = (lb + len + 3) & ~(Uint32)3; + sumlen += len; + + } + + if (buf) + { + UintPtr org = UintPtr(buf); + UintPtr use = (org + 7) & ~(UintPtr)7; + + buf = (void*)use; + bufLen -= (use - org); + + if (unlikely(sumlen > bufLen)) + goto ebuftosmall; + } + else + { + buf = malloc(sumlen); + if (unlikely(buf == 0)) + goto enomem; + bufLen = 0; + assert((UintPtr(buf) & 7) == 0); + } + + pos = (char*)buf; + for (Uint32 i = 0; i<parts; i++) + { + Uint32 lb, len; + NdbSqlUtil::get_var_length(partcols[i]->m_type, + keyData[i].ptr, keyData[i].len, lb, len); + CHARSET_INFO* cs; + if ((cs = partcols[i]->m_cs)) + { + Uint32 xmul = cs->strxfrm_multiply; + if (xmul == 0) + xmul = 1; + /* + * Varchar end-spaces are ignored in comparisons. To get same hash + * we blank-pad to maximum length via strnxfrm. + */ + Uint32 maxlen = (partcols[i]->m_attrSize * partcols[i]->m_arraySize); + Uint32 dstLen = xmul * (maxlen - lb); + int n = NdbSqlUtil::strnxfrm_bug7284(cs, + (unsigned char*)pos, + dstLen, + ((unsigned char*)keyData[i].ptr)+lb, + len); + + if (unlikely(n == -1)) + goto emalformedstring; + + while ((n & 3) != 0) + { + pos[n++] = 0; + } + pos += n; + } + else + { + len += lb; + memcpy(pos, keyData[i].ptr, len); + while (len & 3) + { + * (pos + len++) = 0; + } + pos += len; + } + } + len = UintPtr(pos) - UintPtr(buf); + assert((len & 3) == 0); + + Uint32 values[4]; + md5_hash(values, (const Uint64*)buf, len >> 2); + + if (retval) + { + * retval = values[1]; + } + + if (bufLen == 0) + free(buf); + + return 0; + +enullptr: + return 4316; + +emissingnullptr: + return 4276; + +elentosmall: + return 4277; + +ebuftosmall: + return 4278; + +emalformedstring: + if (bufLen == 0) + free(buf); + + return 4279; + +emalformedkey: + return 4280; + +enomem: + return 4000; +} + NdbTransaction* Ndb::startTransaction(const NdbDictionary::Table *table, const char * keyData, Uint32 keyLen) @@ -427,7 +612,11 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId) theRemainingStartTransactions--; NdbTransaction* tConNext = theTransactionList; - tConnection->init(); + if (tConnection->init()) + { + theError.code = tConnection->theError.code; + DBUG_RETURN(NULL); + } theTransactionList = tConnection; // into a transaction list. tConnection->next(tConNext); // Add the active connection object tConnection->setTransactionId(tFirstTransId); @@ -741,17 +930,27 @@ Ndb::getNodeId() } /**************************************************************************** -Uint64 getTupleIdFromNdb( Uint32 aTableId, Uint32 cacheSize ); - -Parameters: aTableId : The TableId. - cacheSize: Prefetch this many values -Remark: Returns a new TupleId to the application. - The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId. - It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp. +Uint64 getAutoIncrementValue( const char* aTableName, + Uint64 & autoValue, + Uint32 cacheSize, + Uint64 step, + Uint64 start); + +Parameters: aTableName (IN) : The table name. + autoValue (OUT) : Returns new autoincrement value + cacheSize (IN) : Prefetch this many values + step (IN) : Specifies the step between the + autoincrement values. + start (IN) : Start value for first value +Remark: Returns a new autoincrement value to the application. + The autoincrement values can be increased by steps + (default 1) and a number of values can be prefetched + by specifying cacheSize (default 10). ****************************************************************************/ int Ndb::getAutoIncrementValue(const char* aTableName, - Uint64 & tupleId, Uint32 cacheSize) + Uint64 & autoValue, Uint32 cacheSize, + Uint64 step, Uint64 start) { DBUG_ENTER("Ndb::getAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -765,15 +964,16 @@ Ndb::getAutoIncrementValue(const char* aTableName, } const NdbTableImpl* table = info->m_table_impl; TupleIdRange & range = info->m_tuple_id_range; - if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) + if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong) tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong) autoValue)); DBUG_RETURN(0); } int Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & tupleId, Uint32 cacheSize) + Uint64 & autoValue, Uint32 cacheSize, + Uint64 step, Uint64 start) { DBUG_ENTER("Ndb::getAutoIncrementValue"); ASSERT_NOT_MYSQLD; @@ -788,51 +988,86 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, DBUG_RETURN(-1); } TupleIdRange & range = info->m_tuple_id_range; - if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) + if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } int Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & tupleId, - Uint32 cacheSize) + TupleIdRange & range, Uint64 & autoValue, + Uint32 cacheSize, Uint64 step, Uint64 start) { DBUG_ENTER("Ndb::getAutoIncrementValue"); assert(aTable != 0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); - if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) + if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)autoValue)); DBUG_RETURN(0); } int Ndb::getTupleIdFromNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 & tupleId, Uint32 cacheSize) + TupleIdRange & range, Uint64 & tupleId, + Uint32 cacheSize, Uint64 step, Uint64 start) { +/* + Returns a new TupleId to the application. + The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId. + It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp. + In most cases step= start= 1, in which case we get: + 1,2,3,4,5,... + If step=10 and start=5 and first number is 1, we get: + 5,15,25,35,... +*/ DBUG_ENTER("Ndb::getTupleIdFromNdb"); - if (range.m_first_tuple_id != range.m_last_tuple_id) + /* + Check if the next value can be taken from the pre-fetched + sequence. + */ + if (range.m_first_tuple_id != range.m_last_tuple_id && + range.m_first_tuple_id + step <= range.m_last_tuple_id) { assert(range.m_first_tuple_id < range.m_last_tuple_id); - tupleId = ++range.m_first_tuple_id; - DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId)); + range.m_first_tuple_id += step; + tupleId = range.m_first_tuple_id; + DBUG_PRINT("info", ("Next cached value %lu", (ulong) tupleId)); } else { + /* + If start value is greater than step it is ignored + */ + Uint64 offset = (start > step) ? 1 : start; + + /* + Pre-fetch a number of values depending on cacheSize + */ if (cacheSize == 0) cacheSize = 1; + DBUG_PRINT("info", ("reading %u values from database", (uint)cacheSize)); /* * reserve next cacheSize entries in db. adds cacheSize to NEXTID - * and returns first tupleId in the new range. + * and returns first tupleId in the new range. If tupleId's are + * incremented in steps then multiply the cacheSize with step size. */ - Uint64 opValue = cacheSize; + Uint64 opValue = cacheSize * step; + if (opTupleIdOnNdb(table, range, opValue, 0) == -1) DBUG_RETURN(-1); - tupleId = opValue; + DBUG_PRINT("info", ("Next value fetched from database %lu", (ulong) opValue)); + DBUG_PRINT("info", ("Increasing %lu by offset %lu, increment is %lu", (ulong) (ulong) opValue, (ulong) offset, (ulong) step)); + Uint64 current, next; + Uint64 div = ((Uint64) (opValue + step - offset)) / step; + next = div * step + offset; + current = (next < step) ? next : next - step; + tupleId = (opValue <= current) ? current : next; + DBUG_PRINT("info", ("Returning %lu", (ulong) tupleId)); + range.m_first_tuple_id = tupleId; } DBUG_RETURN(0); } @@ -1077,7 +1312,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->incValue("NEXTID", opValue); tRecAttrResult = tOperation->getValue("NEXTID"); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; tValue = tRecAttrResult->u_64_value(); @@ -1092,7 +1327,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->equal("SYSKEY_0", aTableId ); tOperation->setValue("NEXTID", opValue); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; range.reset(); @@ -1109,7 +1344,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->def_label(0); tOperation->interpret_exit_nok(9999); - if (tConnection->execute( Commit ) == -1) + if (tConnection->execute( NdbTransaction::Commit ) == -1) { if (tConnection->theError.code != 9999) goto error_handler; @@ -1126,7 +1361,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->readTuple(); tOperation->equal("SYSKEY_0", aTableId ); tRecAttrResult = tOperation->getValue("NEXTID"); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; opValue = tRecAttrResult->u_64_value(); // out break; @@ -1191,13 +1426,18 @@ const char * Ndb::getCatalogName() const return theImpl->m_dbname.c_str(); } -void Ndb::setCatalogName(const char * a_catalog_name) +int Ndb::setCatalogName(const char * a_catalog_name) { // TODO can table_name_separator be escaped? if (a_catalog_name && ! strchr(a_catalog_name, table_name_separator)) { - theImpl->m_dbname.assign(a_catalog_name); - theImpl->update_prefix(); + if (!theImpl->m_dbname.assign(a_catalog_name) || + theImpl->update_prefix()) + { + theError.code = 4000; + return -1; + } } + return 0; } const char * Ndb::getSchemaName() const @@ -1205,13 +1445,18 @@ const char * Ndb::getSchemaName() const return theImpl->m_schemaname.c_str(); } -void Ndb::setSchemaName(const char * a_schema_name) +int Ndb::setSchemaName(const char * a_schema_name) { // TODO can table_name_separator be escaped? if (a_schema_name && ! strchr(a_schema_name, table_name_separator)) { - theImpl->m_schemaname.assign(a_schema_name); - theImpl->update_prefix(); + if (!theImpl->m_schemaname.assign(a_schema_name) || + theImpl->update_prefix()) + { + theError.code = 4000; + return -1; + } } + return 0; } // </internal> @@ -1220,9 +1465,9 @@ const char * Ndb::getDatabaseName() const return getCatalogName(); } -void Ndb::setDatabaseName(const char * a_catalog_name) +int Ndb::setDatabaseName(const char * a_catalog_name) { - setCatalogName(a_catalog_name); + return setCatalogName(a_catalog_name); } const char * Ndb::getDatabaseSchemaName() const @@ -1230,9 +1475,9 @@ const char * Ndb::getDatabaseSchemaName() const return getSchemaName(); } -void Ndb::setDatabaseSchemaName(const char * a_schema_name) +int Ndb::setDatabaseSchemaName(const char * a_schema_name) { - setSchemaName(a_schema_name); + return setSchemaName(a_schema_name); } int Ndb::setDatabaseAndSchemaName(const NdbDictionary::Table* t) @@ -1402,6 +1647,11 @@ const BaseString Ndb::getDatabaseFromInternalName(const char * internalName) { char * databaseName = new char[strlen(internalName) + 1]; + if (databaseName == NULL) + { + errno = ENOMEM; + return BaseString(NULL); + } strcpy(databaseName, internalName); register char *ptr = databaseName; @@ -1418,6 +1668,11 @@ const BaseString Ndb::getSchemaFromInternalName(const char * internalName) { char * schemaName = new char[strlen(internalName)]; + if (schemaName == NULL) + { + errno = ENOMEM; + return BaseString(NULL); + } register const char *ptr1 = internalName; /* Scan name for the second table_name_separator */ diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index 9ebc5fa9a81..f3d1dbe3dd1 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -536,7 +536,7 @@ int NdbBlob::setTableKeyValue(NdbOperation* anOp) { DBUG_ENTER("NdbBlob::setTableKeyValue"); - DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords); + DBUG_DUMP("info", (uchar*) theKeyBuf.data, 4 * theTable->m_keyLenInWords); const Uint32* data = (const Uint32*)theKeyBuf.data; const unsigned columns = theTable->m_columns.size(); unsigned pos = 0; @@ -562,7 +562,8 @@ int NdbBlob::setAccessKeyValue(NdbOperation* anOp) { DBUG_ENTER("NdbBlob::setAccessKeyValue"); - DBUG_DUMP("info", theAccessKeyBuf.data, 4 * theAccessTable->m_keyLenInWords); + DBUG_DUMP("info", (uchar*) theAccessKeyBuf.data, + 4 * theAccessTable->m_keyLenInWords); const Uint32* data = (const Uint32*)theAccessKeyBuf.data; const unsigned columns = theAccessTable->m_columns.size(); unsigned pos = 0; @@ -587,7 +588,7 @@ NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part) { DBUG_ENTER("NdbBlob::setPartKeyValue"); DBUG_PRINT("info", ("dist=%u part=%u packkey=", getDistKey(part), part)); - DBUG_DUMP("info", thePackKeyBuf.data, 4 * thePackKeyBuf.size); + DBUG_DUMP("info", (uchar*) thePackKeyBuf.data, 4 * thePackKeyBuf.size); // TODO use attr ids after compatibility with 4.1.7 not needed if (anOp->equal("PK", thePackKeyBuf.data) == -1 || anOp->equal("DIST", getDistKey(part)) == -1 || @@ -607,6 +608,12 @@ NdbBlob::getHeadInlineValue(NdbOperation* anOp) setErrorCode(anOp); DBUG_RETURN(-1); } + /* + * If we get no data from this op then the operation is aborted + * one way or other. Following hack in 5.0 makes sure we don't read + * garbage. The proper fix exists only in version >= 5.1. + */ + theHead->length = 0; DBUG_RETURN(0); } @@ -1031,7 +1038,9 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes) DBUG_RETURN(-1); Uint32 n = thePartSize - off; if (n > len) { - memset(thePartBuf.data + off + len, theFillChar, n - len); + /* If we are adding data at the end, fill rest of part. */ + if (pos + len >= theLength) + memset(thePartBuf.data + off + len, theFillChar, n - len); n = len; } memcpy(thePartBuf.data + off, buf, n); @@ -1127,13 +1136,18 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count) while (n < count) { NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable); if (tOp == NULL || - tOp->committedRead() == -1 || + /* + * This was committedRead() before. However lock on main + * table tuple does not fully protect blob parts since DBTUP + * commits each tuple separately. + */ + tOp->readTuple(NdbOperation::LM_SimpleRead) == -1 || setPartKeyValue(tOp, part + n) == -1 || tOp->getValue((Uint32)3, buf) == NULL) { setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::ReadRequest); @@ -1169,7 +1183,7 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::InsertRequest); @@ -1193,7 +1207,7 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::UpdateRequest); @@ -1216,7 +1230,7 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; n++; thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); @@ -1252,7 +1266,8 @@ NdbBlob::deletePartsUnknown(Uint32 part) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption= NdbTransaction::AO_IgnoreError; + tOp->m_abortOption= NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; n++; } DBUG_PRINT("info", ("bat=%u", bat)); @@ -1382,7 +1397,7 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl if (isReadOp()) { // upgrade lock mode if (theNdbOp->theLockMode == NdbOperation::LM_CommittedRead) - theNdbOp->theLockMode = NdbOperation::LM_Read; + theNdbOp->setReadLockMode(NdbOperation::LM_Read); // add read of head+inline in this op if (getHeadInlineValue(theNdbOp) == -1) DBUG_RETURN(-1); @@ -1403,7 +1418,7 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl if (isScanOp()) { // upgrade lock mode if (theNdbOp->theLockMode == NdbOperation::LM_CommittedRead) - theNdbOp->theLockMode = NdbOperation::LM_Read; + theNdbOp->setReadLockMode(NdbOperation::LM_Read); // add read of head+inline in this op if (getHeadInlineValue(theNdbOp) == -1) DBUG_RETURN(-1); @@ -1588,7 +1603,8 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) DBUG_RETURN(-1); } if (isWriteOp()) { - tOp->m_abortOption = NdbTransaction::AO_IgnoreError; + tOp->m_abortOption = NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; } theHeadInlineReadOp = tOp; // execute immediately @@ -1634,7 +1650,8 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) DBUG_RETURN(-1); } if (isWriteOp()) { - tOp->m_abortOption = NdbTransaction::AO_IgnoreError; + tOp->m_abortOption = NdbOperation::AO_IgnoreError; + tOp->m_noErrorPropagation = true; } theHeadInlineReadOp = tOp; // execute immediately @@ -1807,7 +1824,7 @@ NdbBlob::postExecute(NdbTransaction::ExecType anExecType) setErrorCode(NdbBlobImpl::ErrAbort); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; DBUG_PRINT("info", ("added op to update head+inline")); } DBUG_RETURN(0); @@ -1837,7 +1854,7 @@ NdbBlob::preCommit() setErrorCode(NdbBlobImpl::ErrAbort); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; DBUG_PRINT("info", ("added op to update head+inline")); } } diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 6d548126126..a4395fc4b9c 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -78,9 +78,9 @@ NdbDictionary::Column::operator=(const NdbDictionary::Column& column) return *this; } -void +int NdbDictionary::Column::setName(const char * name){ - m_impl.m_name.assign(name); + return !m_impl.m_name.assign(name); } const char* @@ -234,10 +234,10 @@ NdbDictionary::Column::setAutoIncrementInitialValue(Uint64 val){ m_impl.m_autoIncrementInitialValue = val; } -void +int NdbDictionary::Column::setDefaultValue(const char* defaultValue) { - m_impl.m_defaultValue.assign(defaultValue); + return !m_impl.m_defaultValue.assign(defaultValue); } const char* @@ -327,9 +327,9 @@ NdbDictionary::Table::operator=(const NdbDictionary::Table& table) return *this; } -void +int NdbDictionary::Table::setName(const char * name){ - m_impl.setName(name); + return m_impl.setName(name); } const char * @@ -347,12 +347,24 @@ NdbDictionary::Table::getTableId() const { return m_impl.m_id; } -void +int NdbDictionary::Table::addColumn(const Column & c){ NdbColumnImpl* col = new NdbColumnImpl; + if (col == NULL) + { + errno = ENOMEM; + return -1; + } (* col) = NdbColumnImpl::getImpl(c); - m_impl.m_columns.push_back(col); - m_impl.buildColumnHash(); + if (m_impl.m_columns.push_back(col)) + { + return -1; + } + if (m_impl.buildColumnHash()) + { + return -1; + } + return 0; } const NdbDictionary::Column* @@ -495,10 +507,22 @@ NdbDictionary::Table::getFrmLength() const { return m_impl.getFrmLength(); } +enum NdbDictionary::Table::SingleUserMode +NdbDictionary::Table::getSingleUserMode() const +{ + return (enum SingleUserMode)m_impl.m_single_user_mode; +} + void +NdbDictionary::Table::setSingleUserMode(enum NdbDictionary::Table::SingleUserMode mode) +{ + m_impl.m_single_user_mode = (Uint8)mode; +} + +int NdbDictionary::Table::setTablespaceNames(const void *data, Uint32 len) { - m_impl.setTablespaceNames(data, len); + return m_impl.setTablespaceNames(data, len); } const void* @@ -537,9 +561,9 @@ NdbDictionary::Table::getFragmentCount() const return m_impl.getFragmentCount(); } -void +int NdbDictionary::Table::setFrm(const void* data, Uint32 len){ - m_impl.setFrm(data, len); + return m_impl.setFrm(data, len); } const void* @@ -552,10 +576,10 @@ NdbDictionary::Table::getFragmentDataLen() const { return m_impl.getFragmentDataLen(); } -void +int NdbDictionary::Table::setFragmentData(const void* data, Uint32 len) { - m_impl.setFragmentData(data, len); + return m_impl.setFragmentData(data, len); } const void* @@ -568,10 +592,10 @@ NdbDictionary::Table::getTablespaceDataLen() const { return m_impl.getTablespaceDataLen(); } -void +int NdbDictionary::Table::setTablespaceData(const void* data, Uint32 len) { - m_impl.setTablespaceData(data, len); + return m_impl.setTablespaceData(data, len); } const void* @@ -584,10 +608,10 @@ NdbDictionary::Table::getRangeListDataLen() const { return m_impl.getRangeListDataLen(); } -void +int NdbDictionary::Table::setRangeListData(const void* data, Uint32 len) { - m_impl.setRangeListData(data, len); + return m_impl.setRangeListData(data, len); } NdbDictionary::Object::Status @@ -669,18 +693,18 @@ NdbDictionary::Table::getTablespaceName() const return m_impl.m_tablespace_name.c_str(); } -void +int NdbDictionary::Table::setTablespaceName(const char * name){ m_impl.m_tablespace_id = ~0; m_impl.m_tablespace_version = ~0; - m_impl.m_tablespace_name.assign(name); + return !m_impl.m_tablespace_name.assign(name); } -void +int NdbDictionary::Table::setTablespace(const NdbDictionary::Tablespace & ts){ m_impl.m_tablespace_id = NdbTablespaceImpl::getImpl(ts).m_id; m_impl.m_tablespace_version = ts.getObjectVersion(); - m_impl.m_tablespace_name.assign(ts.getName()); + return !m_impl.m_tablespace_name.assign(ts.getName()); } void @@ -729,6 +753,7 @@ NdbDictionary::Table::validate(NdbError& error) /***************************************************************** * Index facade */ + NdbDictionary::Index::Index(const char * name) : m_impl(* new NdbIndexImpl(* this)) { @@ -747,9 +772,9 @@ NdbDictionary::Index::~Index(){ } } -void +int NdbDictionary::Index::setName(const char * name){ - m_impl.setName(name); + return m_impl.setName(name); } const char * @@ -757,9 +782,9 @@ NdbDictionary::Index::getName() const { return m_impl.getName(); } -void +int NdbDictionary::Index::setTable(const char * table){ - m_impl.setTable(table); + return m_impl.setTable(table); } const char * @@ -794,39 +819,56 @@ NdbDictionary::Index::getIndexColumn(int no) const { return NULL; } -void +int NdbDictionary::Index::addColumn(const Column & c){ NdbColumnImpl* col = new NdbColumnImpl; + if (col == NULL) + { + errno = ENOMEM; + return -1; + } (* col) = NdbColumnImpl::getImpl(c); - m_impl.m_columns.push_back(col); + if (m_impl.m_columns.push_back(col)) + { + return -1; + } + return 0; } -void +int NdbDictionary::Index::addColumnName(const char * name){ const Column c(name); - addColumn(c); + return addColumn(c); } -void +int NdbDictionary::Index::addIndexColumn(const char * name){ const Column c(name); - addColumn(c); + return addColumn(c); } -void +int NdbDictionary::Index::addColumnNames(unsigned noOfNames, const char ** names){ for(unsigned i = 0; i < noOfNames; i++) { const Column c(names[i]); - addColumn(c); + if (addColumn(c)) + { + return -1; + } } + return 0; } -void +int NdbDictionary::Index::addIndexColumns(int noOfNames, const char ** names){ for(int i = 0; i < noOfNames; i++) { const Column c(names[i]); - addColumn(c); + if (addColumn(c)) + { + return -1; + } } + return 0; } void @@ -904,10 +946,10 @@ NdbDictionary::Event::~Event() } } -void +int NdbDictionary::Event::setName(const char * name) { - m_impl.setName(name); + return m_impl.setName(name); } const char * @@ -928,10 +970,10 @@ NdbDictionary::Event::getTable() const return m_impl.getTable(); } -void +int NdbDictionary::Event::setTable(const char * table) { - m_impl.setTable(table); + return m_impl.setTable(table); } const char* @@ -1263,18 +1305,18 @@ NdbDictionary::Datafile::getFree() const { return m_impl.m_free; } -void +int NdbDictionary::Datafile::setTablespace(const char * tablespace){ m_impl.m_filegroup_id = ~0; m_impl.m_filegroup_version = ~0; - m_impl.m_filegroup_name.assign(tablespace); + return !m_impl.m_filegroup_name.assign(tablespace); } -void +int NdbDictionary::Datafile::setTablespace(const NdbDictionary::Tablespace & ts){ m_impl.m_filegroup_id = NdbTablespaceImpl::getImpl(ts).m_id; m_impl.m_filegroup_version = ts.getObjectVersion(); - m_impl.m_filegroup_name.assign(ts.getName()); + return !m_impl.m_filegroup_name.assign(ts.getName()); } const char * diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index c9b25189b6e..ab6d90ad59e 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -42,7 +42,7 @@ #include <my_sys.h> #include <NdbEnv.h> #include <NdbMem.h> -#include <ndb_version.h> +#include <util/version.h> #define DEBUG_PRINT 0 #define INCOMPATIBLE_VERSION -2 @@ -51,7 +51,7 @@ #define ERR_RETURN(a,b) \ {\ - DBUG_PRINT("exit", ("error %d", (a).code));\ + DBUG_PRINT("exit", ("error %d return %d", (a).code, b));\ DBUG_RETURN(b);\ } @@ -378,6 +378,11 @@ NdbColumnImpl::create_pseudo(const char * name){ col->m_impl.m_attrSize = 8; col->m_impl.m_arraySize = 1; col->m_impl.m_nullable = true; + } else if(!strcmp(name, "NDB$ANY_VALUE")){ + col->setType(NdbDictionary::Column::Unsigned); + col->m_impl.m_attrId = AttributeHeader::ANY_VALUE; + col->m_impl.m_attrSize = 4; + col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$COPY_ROWID")){ col->setType(NdbDictionary::Column::Bigunsigned); col->m_impl.m_attrId = AttributeHeader::COPY_ROWID; @@ -476,6 +481,7 @@ NdbTableImpl::init(){ m_tablespace_name.clear(); m_tablespace_id = ~0; m_tablespace_version = ~0; + m_single_user_mode = 0; } bool @@ -669,18 +675,30 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const DBUG_RETURN(false); } } + + if(m_single_user_mode != obj.m_single_user_mode) + { + DBUG_PRINT("info",("m_single_user_mode %d != %d", + (int32)m_single_user_mode, + (int32)obj.m_single_user_mode)); + DBUG_RETURN(false); + } + DBUG_RETURN(true); } -void +int NdbTableImpl::assign(const NdbTableImpl& org) { DBUG_ENTER("NdbColumnImpl::assign"); DBUG_PRINT("info", ("this: %p &org: %p", this, &org)); /* m_changeMask intentionally not copied */ m_primaryTableId = org.m_primaryTableId; - m_internalName.assign(org.m_internalName); - updateMysqlName(); + if (!m_internalName.assign(org.m_internalName) || + updateMysqlName()) + { + return -1; + } // If the name has been explicitly set, use that name // otherwise use the fetched name if (!org.m_newExternalName.empty()) @@ -712,9 +730,18 @@ NdbTableImpl::assign(const NdbTableImpl& org) for(i = 0; i < org.m_columns.size(); i++) { NdbColumnImpl * col = new NdbColumnImpl(); + if (col == NULL) + { + errno = ENOMEM; + return -1; + } const NdbColumnImpl * iorg = org.m_columns[i]; (* col) = (* iorg); - m_columns.push_back(col); + if (m_columns.push_back(col)) + { + delete col; + return -1; + } } m_fragments = org.m_fragments; @@ -733,6 +760,8 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_keyLenInWords = org.m_keyLenInWords; m_fragmentCount = org.m_fragmentCount; + m_single_user_mode = org.m_single_user_mode; + if (m_index != 0) delete m_index; m_index = org.m_index; @@ -755,12 +784,12 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_tablespace_name = org.m_tablespace_name; m_tablespace_id= org.m_tablespace_id; m_tablespace_version = org.m_tablespace_version; - DBUG_VOID_RETURN; + DBUG_RETURN(0); } -void NdbTableImpl::setName(const char * name) +int NdbTableImpl::setName(const char * name) { - m_newExternalName.assign(name); + return !m_newExternalName.assign(name); } const char * @@ -859,9 +888,9 @@ NdbTableImpl::getTablespaceNamesLen() const return m_new_ts_name.length(); } -void NdbTableImpl::setTablespaceNames(const void *data, Uint32 len) +int NdbTableImpl::setTablespaceNames(const void *data, Uint32 len) { - m_new_ts_name.assign(data, len); + return !m_new_ts_name.assign(data, len); } void NdbTableImpl::setFragmentCount(Uint32 count) @@ -874,9 +903,9 @@ Uint32 NdbTableImpl::getFragmentCount() const return m_fragmentCount; } -void NdbTableImpl::setFrm(const void* data, Uint32 len) +int NdbTableImpl::setFrm(const void* data, Uint32 len) { - m_newFrm.assign(data, len); + return m_newFrm.assign(data, len); } const void * @@ -897,9 +926,9 @@ NdbTableImpl::getFrmLength() const return m_newFrm.length(); } -void NdbTableImpl::setFragmentData(const void* data, Uint32 len) +int NdbTableImpl::setFragmentData(const void* data, Uint32 len) { - m_new_fd.assign(data, len); + return m_new_fd.assign(data, len); } const void * @@ -920,9 +949,9 @@ NdbTableImpl::getFragmentDataLen() const return m_new_fd.length(); } -void NdbTableImpl::setTablespaceData(const void* data, Uint32 len) +int NdbTableImpl::setTablespaceData(const void* data, Uint32 len) { - m_new_ts.assign(data, len); + return !m_new_ts.assign(data, len); } const void * @@ -943,9 +972,9 @@ NdbTableImpl::getTablespaceDataLen() const return m_new_ts.length(); } -void NdbTableImpl::setRangeListData(const void* data, Uint32 len) +int NdbTableImpl::setRangeListData(const void* data, Uint32 len) { - m_new_range.assign(data, len); + return m_new_range.assign(data, len); } const void * @@ -966,19 +995,18 @@ NdbTableImpl::getRangeListDataLen() const return m_new_range.length(); } -void +int NdbTableImpl::updateMysqlName() { Vector<BaseString> v; if (m_internalName.split(v,"/") == 3) { - m_mysqlName.assfmt("%s/%s",v[0].c_str(),v[2].c_str()); - return; + return !m_mysqlName.assfmt("%s/%s",v[0].c_str(),v[2].c_str()); } - m_mysqlName.assign(""); + return !m_mysqlName.assign(""); } -void +int NdbTableImpl::buildColumnHash(){ const Uint32 size = m_columns.size(); int i; @@ -990,19 +1018,29 @@ NdbTableImpl::buildColumnHash(){ } Vector<Uint32> hashValues; - Vector<Vector<Uint32> > chains; chains.fill(size, hashValues); + Vector<Vector<Uint32> > chains; + if (chains.fill(size, hashValues)) + { + return -1; + } for(i = 0; i< (int) size; i++){ Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE; Uint32 bucket = hv & m_columnHashMask; bucket = (bucket < size ? bucket : bucket - size); assert(bucket < size); - hashValues.push_back(hv); - chains[bucket].push_back(i); + if (hashValues.push_back(hv) || + chains[bucket].push_back(i)) + { + return -1; + } } m_columnHash.clear(); Uint32 tmp = 1; - m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining + if (m_columnHash.fill((unsigned)size-1, tmp)) // Default no chaining + { + return -1; + } Uint32 pos = 0; // In overflow vector for(i = 0; i< (int) size; i++){ @@ -1022,12 +1060,18 @@ NdbTableImpl::buildColumnHash(){ for(size_t j = 0; j<sz; j++, pos++){ Uint32 col = chains[i][j]; Uint32 hv = hashValues[col]; - m_columnHash.push_back((col << 16) | hv); + if (m_columnHash.push_back((col << 16) | hv)) + { + return -1; + } } } } - m_columnHash.push_back(0); // Overflow when looping in end of array + if (m_columnHash.push_back(0)) // Overflow when looping in end of array + { + return -1; + } #if 0 for(size_t i = 0; i<m_columnHash.size(); i++){ @@ -1042,6 +1086,7 @@ NdbTableImpl::buildColumnHash(){ i, col > 0 ? m_columns[col]->getName() : "" , m_columnHash[i]); } #endif + return 0; } Uint32 @@ -1160,9 +1205,9 @@ NdbIndexImpl::~NdbIndexImpl(){ delete m_columns[i]; } -void NdbIndexImpl::setName(const char * name) +int NdbIndexImpl::setName(const char * name) { - m_externalName.assign(name); + return !m_externalName.assign(name); } const char * @@ -1171,10 +1216,10 @@ NdbIndexImpl::getName() const return m_externalName.c_str(); } -void +int NdbIndexImpl::setTable(const char * table) { - m_tableName.assign(table); + return !m_tableName.assign(table); } const char * @@ -1235,9 +1280,9 @@ NdbEventImpl::~NdbEventImpl() DBUG_VOID_RETURN; } -void NdbEventImpl::setName(const char * name) +int NdbEventImpl::setName(const char * name) { - m_name.assign(name); + return !m_name.assign(name); } const char *NdbEventImpl::getName() const @@ -1245,11 +1290,11 @@ const char *NdbEventImpl::getName() const return m_name.c_str(); } -void +int NdbEventImpl::setTable(const NdbDictionary::Table& table) { setTable(&NdbTableImpl::getImpl(table)); - m_tableName.assign(m_tableImpl->getName()); + return !m_tableName.assign(m_tableImpl->getName()); } void @@ -1274,10 +1319,10 @@ NdbEventImpl::getTable() const return NULL; } -void +int NdbEventImpl::setTable(const char * table) { - m_tableName.assign(table); + return !m_tableName.assign(table); } const char * @@ -1397,14 +1442,18 @@ NdbDictionaryImpl::fetchGlobalTableImplRef(const GlobalCacheInitObject &obj) { DBUG_ENTER("fetchGlobalTableImplRef"); NdbTableImpl *impl; + int error= 0; m_globalHash->lock(); - impl = m_globalHash->get(obj.m_name.c_str()); + impl = m_globalHash->get(obj.m_name.c_str(), &error); m_globalHash->unlock(); if (impl == 0){ - impl = m_receiver.getTable(obj.m_name.c_str(), - m_ndb.usingFullyQualifiedNames()); + if (error == 0) + impl = m_receiver.getTable(obj.m_name.c_str(), + m_ndb.usingFullyQualifiedNames()); + else + m_error.code = 4000; if (impl != 0 && obj.init(*impl)) { delete impl; @@ -1424,10 +1473,11 @@ NdbDictionaryImpl::putTable(NdbTableImpl *impl) NdbTableImpl *old; int ret = getBlobTables(*impl); + int error = 0; assert(ret == 0); m_globalHash->lock(); - if ((old= m_globalHash->get(impl->m_internalName.c_str()))) + if ((old= m_globalHash->get(impl->m_internalName.c_str(), &error))) { m_globalHash->alter_table_rep(old->m_internalName.c_str(), impl->m_id, @@ -1867,12 +1917,20 @@ NdbDictInterface::getTable(const BaseString& name, bool fullyQualifiedNames) // Copy name to m_buffer to get a word sized buffer m_buffer.clear(); - m_buffer.grow(namelen_words*4+4); - m_buffer.append(name.c_str(), namelen); + if (m_buffer.grow(namelen_words*4+4) || + m_buffer.append(name.c_str(), namelen)) + { + m_error.code= 4000; + return NULL; + } #ifndef IGNORE_VALGRIND_WARNINGS Uint32 pad = 0; - m_buffer.append(&pad, 4); + if (m_buffer.append(&pad, 4)) + { + m_error.code= 4000; + return NULL; + } #endif LinearSectionPtr ptr[1]; @@ -1903,7 +1961,14 @@ NdbDictInterface::getTable(class NdbApiSignal * signal, m_buffer.length() / 4, fullyQualifiedNames); if(rt) - rt->buildColumnHash(); + { + if (rt->buildColumnHash()) + { + m_error.code = 4000; + delete rt; + return NULL; + } + } return rt; } @@ -1913,18 +1978,25 @@ NdbDictInterface::execGET_TABINFO_CONF(NdbApiSignal * signal, LinearSectionPtr ptr[3]) { const GetTabInfoConf* conf = CAST_CONSTPTR(GetTabInfoConf, signal->getDataPtr()); + const Uint32 i = GetTabInfoConf::DICT_TAB_INFO; if(signal->isFirstFragment()){ m_fragmentId = signal->getFragmentId(); - m_buffer.grow(4 * conf->totalLen); + if (m_buffer.grow(4 * conf->totalLen)) + { + m_error.code= 4000; + goto end; + } } else { if(m_fragmentId != signal->getFragmentId()){ abort(); } } - const Uint32 i = GetTabInfoConf::DICT_TAB_INFO; - m_buffer.append(ptr[i].p, 4 * ptr[i].sz); - + if (m_buffer.append(ptr[i].p, 4 * ptr[i].sz)) + { + m_error.code= 4000; + } +end: if(!signal->isLastFragment()){ return; } @@ -2071,13 +2143,15 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->m_id = tableDesc->TableId; impl->m_version = tableDesc->TableVersion; impl->m_status = NdbDictionary::Object::Retrieved; - impl->m_internalName.assign(internalName); - impl->updateMysqlName(); - impl->m_externalName.assign(externalName); - - impl->m_frm.assign(tableDesc->FrmData, tableDesc->FrmLen); - impl->m_fd.assign(tableDesc->FragmentData, tableDesc->FragmentDataLen); - impl->m_range.assign(tableDesc->RangeListData, tableDesc->RangeListDataLen); + if (!impl->m_internalName.assign(internalName) || + impl->updateMysqlName() || + !impl->m_externalName.assign(externalName) || + impl->m_frm.assign(tableDesc->FrmData, tableDesc->FrmLen) || + impl->m_fd.assign(tableDesc->FragmentData, tableDesc->FragmentDataLen) || + impl->m_range.assign(tableDesc->RangeListData, tableDesc->RangeListDataLen)) + { + DBUG_RETURN(4000); + } impl->m_fragmentCount = tableDesc->FragmentCount; /* @@ -2111,6 +2185,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->m_kvalue = tableDesc->TableKValue; impl->m_minLoadFactor = tableDesc->MinLoadFactor; impl->m_maxLoadFactor = tableDesc->MaxLoadFactor; + impl->m_single_user_mode = tableDesc->SingleUserMode; impl->m_indexType = (NdbDictionary::Object::Type) getApiConstant(tableDesc->TableType, @@ -2121,7 +2196,10 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, } else { const char * externalPrimary = Ndb::externalizeTableName(tableDesc->PrimaryTable, fullyQualifiedNames); - impl->m_primaryTable.assign(externalPrimary); + if (!impl->m_primaryTable.assign(externalPrimary)) + { + DBUG_RETURN(4000); + } } Uint32 i; @@ -2144,6 +2222,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, // check type and compute attribute size and array size if (! attrDesc.translateExtType()) { + delete col; delete impl; NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); @@ -2156,6 +2235,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16); // charset is defined exactly for char types if (col->getCharType() != (cs_number != 0)) { + delete col; delete impl; NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); @@ -2163,6 +2243,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, if (col->getCharType()) { col->m_cs = get_charset(cs_number, MYF(0)); if (col->m_cs == NULL) { + delete col; delete impl; NdbMem_Free((void*)tableDesc); DBUG_RETURN(743); @@ -2183,7 +2264,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, col->m_nullable = attrDesc.AttributeNullableFlag; col->m_autoIncrement = (attrDesc.AttributeAutoIncrement != 0); col->m_autoIncrementInitialValue = ~0; - col->m_defaultValue.assign(attrDesc.AttributeDefaultValue); + if (!col->m_defaultValue.assign(attrDesc.AttributeDefaultValue)) + { + delete col; + delete impl; + DBUG_RETURN(4000); + } col->m_column_no = impl->m_columns.size(); impl->m_columns.push_back(col); @@ -2206,7 +2292,11 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, pos++; // skip logpart for (Uint32 j = 0; j<(Uint32)replicaCount; j++) { - impl->m_fragments.push_back(ntohs(tableDesc->ReplicaData[pos++])); + if (impl->m_fragments.push_back(ntohs(tableDesc->ReplicaData[pos++]))) + { + delete impl; + DBUG_RETURN(4000); + } } } @@ -2252,8 +2342,13 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t) // if the new name has not been set, use the copied name if (t.m_newExternalName.empty()) - t.m_newExternalName.assign(t.m_externalName); - + { + if (!t.m_newExternalName.assign(t.m_externalName)) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } + } // create table if (m_receiver.createTable(m_ndb, t) != 0) DBUG_RETURN(-1); @@ -2428,7 +2523,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setNameFlag(impl.m_changeMask, true); } - impl.m_externalName.assign(impl.m_newExternalName); + if (!impl.m_externalName.assign(impl.m_newExternalName)) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_newExternalName.clear(); } // Definition change (frm) @@ -2438,7 +2537,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setFrmFlag(impl.m_changeMask, true); } - impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length()); + if (impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length())) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_newFrm.clear(); } // Change FragmentData (fragment identity, state, tablespace id) @@ -2448,7 +2551,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setFragDataFlag(impl.m_changeMask, true); } - impl.m_fd.assign(impl.m_new_fd.get_data(), impl.m_new_fd.length()); + if (impl.m_fd.assign(impl.m_new_fd.get_data(), impl.m_new_fd.length())) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_new_fd.clear(); } // Change Tablespace Name Data @@ -2458,8 +2565,12 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setTsNameFlag(impl.m_changeMask, true); } - impl.m_ts_name.assign(impl.m_new_ts_name.get_data(), - impl.m_new_ts_name.length()); + if (impl.m_ts_name.assign(impl.m_new_ts_name.get_data(), + impl.m_new_ts_name.length())) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_new_ts_name.clear(); } // Change Range/List Data @@ -2469,8 +2580,12 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setRangeListFlag(impl.m_changeMask, true); } - impl.m_range.assign(impl.m_new_range.get_data(), - impl.m_new_range.length()); + if (impl.m_range.assign(impl.m_new_range.get_data(), + impl.m_new_range.length())) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_new_range.clear(); } // Change Tablespace Data @@ -2480,8 +2595,12 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, { AlterTableReq::setTsFlag(impl.m_changeMask, true); } - impl.m_ts.assign(impl.m_new_ts.get_data(), - impl.m_new_ts.length()); + if (impl.m_ts.assign(impl.m_new_ts.get_data(), + impl.m_new_ts.length())) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.m_new_ts.clear(); } @@ -2496,7 +2615,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, const BaseString internalName( ndb.internalize_table_name(impl.m_externalName.c_str())); - impl.m_internalName.assign(internalName); + if (!impl.m_internalName.assign(internalName)) + { + m_error.code= 4000; + DBUG_RETURN(-1); + } impl.updateMysqlName(); DictTabInfo::Table *tmpTab; @@ -2572,6 +2695,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab->MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag; tmpTab->LinearHashFlag = impl.m_linear_flag; + tmpTab->SingleUserMode = impl.m_single_user_mode; tmpTab->ForceVarPartFlag = impl.m_force_var_part; if (impl.m_ts_name.length()) @@ -2915,7 +3039,6 @@ int NdbDictionaryImpl::dropTableGlobal(NdbTableImpl & impl) { int res; - const char * name = impl.getName(); DBUG_ENTER("NdbDictionaryImpl::dropTableGlobal"); DBUG_ASSERT(impl.m_status != NdbDictionary::Object::New); DBUG_ASSERT(impl.m_indexType == NdbDictionary::Object::TypeUndefined); @@ -3070,11 +3193,21 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst, { DBUG_ENTER("NdbDictInterface::create_index_obj_from_table"); NdbIndexImpl *idx = new NdbIndexImpl(); + if (idx == NULL) + { + errno = ENOMEM; + return -1; + } idx->m_version = tab->m_version; idx->m_status = tab->m_status; idx->m_id = tab->m_id; - idx->m_externalName.assign(tab->getName()); - idx->m_tableName.assign(prim->m_externalName); + if (!idx->m_externalName.assign(tab->getName()) || + !idx->m_tableName.assign(prim->m_externalName)) + { + delete idx; + errno = ENOMEM; + return -1; + } NdbDictionary::Object::Type type = idx->m_type = tab->m_indexType; idx->m_logging = tab->m_logging; idx->m_temporary = tab->m_temporary; @@ -3088,9 +3221,20 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst, NdbColumnImpl* org = tab->m_columns[i]; NdbColumnImpl* col = new NdbColumnImpl; + if (col == NULL) + { + errno = ENOMEM; + delete idx; + return -1; + } // Copy column definition *col = * org; - idx->m_columns.push_back(col); + if (idx->m_columns.push_back(col)) + { + delete col; + delete idx; + return -1; + } /** * reverse map @@ -4159,34 +4303,72 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list, BaseString databaseName; BaseString schemaName; BaseString objectName; + if (!databaseName || !schemaName || !objectName) + { + m_error.code= 4000; + return -1; + } if ((element.type == NdbDictionary::Object::UniqueHashIndex) || (element.type == NdbDictionary::Object::OrderedIndex)) { char * indexName = new char[n << 2]; + if (indexName == NULL) + { + m_error.code= 4000; + return -1; + } memcpy(indexName, &data[pos], n << 2); - databaseName = Ndb::getDatabaseFromInternalName(indexName); - schemaName = Ndb::getSchemaFromInternalName(indexName); + if (!(databaseName = Ndb::getDatabaseFromInternalName(indexName)) || + !(schemaName = Ndb::getSchemaFromInternalName(indexName))) + { + delete [] indexName; + m_error.code= 4000; + return -1; + } objectName = BaseString(Ndb::externalizeIndexName(indexName, fullyQualifiedNames)); delete [] indexName; } else if ((element.type == NdbDictionary::Object::SystemTable) || (element.type == NdbDictionary::Object::UserTable)) { char * tableName = new char[n << 2]; + if (tableName == NULL) + { + m_error.code= 4000; + return -1; + } memcpy(tableName, &data[pos], n << 2); - databaseName = Ndb::getDatabaseFromInternalName(tableName); - schemaName = Ndb::getSchemaFromInternalName(tableName); + if (!(databaseName = Ndb::getDatabaseFromInternalName(tableName)) || + !(schemaName = Ndb::getSchemaFromInternalName(tableName))) + { + delete [] tableName; + m_error.code= 4000; + return -1; + } objectName = BaseString(Ndb::externalizeTableName(tableName, fullyQualifiedNames)); delete [] tableName; } else { char * otherName = new char[n << 2]; + if (otherName == NULL) + { + m_error.code= 4000; + return -1; + } memcpy(otherName, &data[pos], n << 2); - objectName = BaseString(otherName); + if (!(objectName = BaseString(otherName))) + { + m_error.code= 4000; + return -1; + } delete [] otherName; } - element.database = new char[databaseName.length() + 1]; + if (!(element.database = new char[databaseName.length() + 1]) || + !(element.schema = new char[schemaName.length() + 1]) || + !(element.name = new char[objectName.length() + 1])) + { + m_error.code= 4000; + return -1; + } strcpy(element.database, databaseName.c_str()); - element.schema = new char[schemaName.length() + 1]; strcpy(element.schema, schemaName.c_str()); - element.name = new char[objectName.length() + 1]; strcpy(element.name, objectName.c_str()); pos += n; count++; @@ -4235,7 +4417,10 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal, { const unsigned off = ListTablesConf::HeaderLength; const unsigned len = (signal->getLength() - off); - m_buffer.append(signal->getDataPtr() + off, len << 2); + if (m_buffer.append(signal->getDataPtr() + off, len << 2)) + { + m_error.code= 4000; + } if (signal->getLength() < ListTablesConf::SignalLength) { // last signal has less than full length m_waiter.signal(NO_WAIT); @@ -4288,8 +4473,6 @@ void NdbDictInterface::execWAIT_GCP_CONF(NdbApiSignal* signal, LinearSectionPtr ptr[3]) { - const WaitGCPConf * const conf= - CAST_CONSTPTR(WaitGCPConf, signal->getDataPtr()); m_waiter.signal(NO_WAIT); } @@ -4324,7 +4507,7 @@ NdbTablespaceImpl::NdbTablespaceImpl(NdbDictionary::Tablespace & f) : NdbTablespaceImpl::~NdbTablespaceImpl(){ } -void +int NdbTablespaceImpl::assign(const NdbTablespaceImpl& org) { m_id = org.m_id; @@ -4332,14 +4515,17 @@ NdbTablespaceImpl::assign(const NdbTablespaceImpl& org) m_status = org.m_status; m_type = org.m_type; - m_name.assign(org.m_name); + if (!m_name.assign(org.m_name)) + return -1; m_grow_spec = org.m_grow_spec; m_extent_size = org.m_extent_size; m_undo_free_words = org.m_undo_free_words; m_logfile_group_id = org.m_logfile_group_id; m_logfile_group_version = org.m_logfile_group_version; - m_logfile_group_name.assign(org.m_logfile_group_name); + if (!m_logfile_group_name.assign(org.m_logfile_group_name)) + return -1; m_undo_free_words = org.m_undo_free_words; + return 0; } NdbLogfileGroupImpl::NdbLogfileGroupImpl() : @@ -4357,7 +4543,7 @@ NdbLogfileGroupImpl::NdbLogfileGroupImpl(NdbDictionary::LogfileGroup & f) : NdbLogfileGroupImpl::~NdbLogfileGroupImpl(){ } -void +int NdbLogfileGroupImpl::assign(const NdbLogfileGroupImpl& org) { m_id = org.m_id; @@ -4365,14 +4551,17 @@ NdbLogfileGroupImpl::assign(const NdbLogfileGroupImpl& org) m_status = org.m_status; m_type = org.m_type; - m_name.assign(org.m_name); + if (!m_name.assign(org.m_name)) + return -1; m_grow_spec = org.m_grow_spec; m_extent_size = org.m_extent_size; m_undo_free_words = org.m_undo_free_words; m_logfile_group_id = org.m_logfile_group_id; m_logfile_group_version = org.m_logfile_group_version; - m_logfile_group_name.assign(org.m_logfile_group_name); + if (!m_logfile_group_name.assign(org.m_logfile_group_name)) + return -1; m_undo_free_words = org.m_undo_free_words; + return 0; } NdbFileImpl::NdbFileImpl(NdbDictionary::Object::Type t) @@ -4399,7 +4588,7 @@ NdbDatafileImpl::NdbDatafileImpl(NdbDictionary::Datafile & f) : NdbDatafileImpl::~NdbDatafileImpl(){ } -void +int NdbDatafileImpl::assign(const NdbDatafileImpl& org) { m_id = org.m_id; @@ -4411,8 +4600,10 @@ NdbDatafileImpl::assign(const NdbDatafileImpl& org) m_free = org.m_free; m_filegroup_id = org.m_filegroup_id; m_filegroup_version = org.m_filegroup_version; - m_path.assign(org.m_path); - m_filegroup_name.assign(org.m_filegroup_name); + if (!m_path.assign(org.m_path) || + !m_filegroup_name.assign(org.m_filegroup_name)) + return -1; + return 0; } NdbUndofileImpl::NdbUndofileImpl() : @@ -4430,7 +4621,7 @@ NdbUndofileImpl::NdbUndofileImpl(NdbDictionary::Undofile & f) : NdbUndofileImpl::~NdbUndofileImpl(){ } -void +int NdbUndofileImpl::assign(const NdbUndofileImpl& org) { m_id = org.m_id; @@ -4442,8 +4633,10 @@ NdbUndofileImpl::assign(const NdbUndofileImpl& org) m_free = org.m_free; m_filegroup_id = org.m_filegroup_id; m_filegroup_version = org.m_filegroup_version; - m_path.assign(org.m_path); - m_filegroup_name.assign(org.m_filegroup_name); + if (!m_path.assign(org.m_path) || + !m_filegroup_name.assign(org.m_filegroup_name)) + return 4000; + return 0; } int @@ -4875,7 +5068,8 @@ NdbDictInterface::get_filegroup(NdbFilegroupImpl & dst, get_filegroup(NdbLogfileGroupImpl::getImpl(tmp), NdbDictionary::Object::LogfileGroup, dst.m_logfile_group_id); - dst.m_logfile_group_name.assign(tmp.getName()); + if (!dst.m_logfile_group_name.assign(tmp.getName())) + DBUG_RETURN(m_error.code = 4000); } if(dst.m_type == type) @@ -4909,7 +5103,8 @@ NdbDictInterface::parseFilegroupInfo(NdbFilegroupImpl &dst, dst.m_type = (NdbDictionary::Object::Type)fg.FilegroupType; dst.m_status = NdbDictionary::Object::Retrieved; - dst.m_name.assign(fg.FilegroupName); + if (!dst.m_name.assign(fg.FilegroupName)) + return 4000; dst.m_extent_size = fg.TS_ExtentSize; dst.m_undo_buffer_size = fg.LF_UndoBufferSize; dst.m_logfile_group_id = fg.TS_LogfileGroupId; @@ -5028,7 +5223,8 @@ NdbDictInterface::get_file(NdbFileImpl & dst, get_filegroup(NdbLogfileGroupImpl::getImpl(tmp), NdbDictionary::Object::LogfileGroup, dst.m_filegroup_id); - dst.m_filegroup_name.assign(tmp.getName()); + if (!dst.m_filegroup_name.assign(tmp.getName())) + DBUG_RETURN(m_error.code = 4000); } else if(dst.m_type == NdbDictionary::Object::Datafile) { @@ -5036,7 +5232,8 @@ NdbDictInterface::get_file(NdbFileImpl & dst, get_filegroup(NdbTablespaceImpl::getImpl(tmp), NdbDictionary::Object::Tablespace, dst.m_filegroup_id); - dst.m_filegroup_name.assign(tmp.getName()); + if (!dst.m_filegroup_name.assign(tmp.getName())) + DBUG_RETURN(m_error.code = 4000); dst.m_free *= tmp.getExtentSize(); } else @@ -5072,7 +5269,8 @@ NdbDictInterface::parseFileInfo(NdbFileImpl &dst, dst.m_version = f.FileVersion; dst.m_size= ((Uint64)f.FileSizeHi << 32) | (f.FileSizeLo); - dst.m_path.assign(f.FileName); + if (!dst.m_path.assign(f.FileName)) + return 4000; dst.m_filegroup_id= f.FilegroupId; dst.m_filegroup_version= f.FilegroupVersion; @@ -5098,4 +5296,5 @@ const NdbDictionary::Column * NdbDictionary::Column::DISK_REF = 0; const NdbDictionary::Column * NdbDictionary::Column::RECORDS_IN_RANGE = 0; const NdbDictionary::Column * NdbDictionary::Column::ROWID = 0; const NdbDictionary::Column * NdbDictionary::Column::ROW_GCI = 0; +const NdbDictionary::Column * NdbDictionary::Column::ANY_VALUE = 0; const NdbDictionary::Column * NdbDictionary::Column::COPY_ROWID = 0; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index f2b3173beff..aa9bd174471 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -131,28 +131,28 @@ public: ~NdbTableImpl(); void init(); - void setName(const char * name); + int setName(const char * name); const char * getName() const; void setFragmentCount(Uint32 count); Uint32 getFragmentCount() const; - void setFrm(const void* data, Uint32 len); + int setFrm(const void* data, Uint32 len); const void * getFrmData() const; Uint32 getFrmLength() const; - void setFragmentData(const void* data, Uint32 len); + int setFragmentData(const void* data, Uint32 len); const void * getFragmentData() const; Uint32 getFragmentDataLen() const; - void setTablespaceNames(const void* data, Uint32 len); + int setTablespaceNames(const void* data, Uint32 len); Uint32 getTablespaceNamesLen() const; const void * getTablespaceNames() const; - void setTablespaceData(const void* data, Uint32 len); + int setTablespaceData(const void* data, Uint32 len); const void * getTablespaceData() const; Uint32 getTablespaceDataLen() const; - void setRangeListData(const void* data, Uint32 len); + int setRangeListData(const void* data, Uint32 len); const void * getRangeListData() const; Uint32 getRangeListDataLen() const; const char * getMysqlName() const; - void updateMysqlName(); + int updateMysqlName(); int aggregate(NdbError& error); int validate(NdbError& error); @@ -182,7 +182,7 @@ public: Vector<Uint32> m_columnHash; Vector<NdbColumnImpl *> m_columns; void computeAggregates(); - void buildColumnHash(); + int buildColumnHash(); /** * Fragment info @@ -205,6 +205,7 @@ public: int m_maxLoadFactor; Uint16 m_keyLenInWords; Uint16 m_fragmentCount; + Uint8 m_single_user_mode; NdbIndexImpl * m_index; NdbColumnImpl * getColumn(unsigned attrId); @@ -232,7 +233,7 @@ public: * Equality/assign */ bool equal(const NdbTableImpl&) const; - void assign(const NdbTableImpl&); + int assign(const NdbTableImpl&); static NdbTableImpl & getImpl(NdbDictionary::Table & t); static NdbTableImpl & getImpl(const NdbDictionary::Table & t); @@ -258,9 +259,9 @@ public: ~NdbIndexImpl(); void init(); - void setName(const char * name); + int setName(const char * name); const char * getName() const; - void setTable(const char * table); + int setTable(const char * table); const char * getTable() const; const NdbTableImpl * getIndexTable() const; @@ -296,11 +297,11 @@ public: ~NdbEventImpl(); void init(); - void setName(const char * name); + int setName(const char * name); const char * getName() const; - void setTable(const NdbDictionary::Table& table); + int setTable(const NdbDictionary::Table& table); const NdbDictionary::Table * getTable() const; - void setTable(const char * table); + int setTable(const char * table); const char * getTableName() const; void addTableEvent(const NdbDictionary::Event::TableEvent t); bool getTableEvent(const NdbDictionary::Event::TableEvent t) const; @@ -364,7 +365,7 @@ public: NdbTablespaceImpl(NdbDictionary::Tablespace &); ~NdbTablespaceImpl(); - void assign(const NdbTablespaceImpl&); + int assign(const NdbTablespaceImpl&); static NdbTablespaceImpl & getImpl(NdbDictionary::Tablespace & t); static const NdbTablespaceImpl & getImpl(const NdbDictionary::Tablespace &); @@ -378,7 +379,7 @@ public: NdbLogfileGroupImpl(NdbDictionary::LogfileGroup &); ~NdbLogfileGroupImpl(); - void assign(const NdbLogfileGroupImpl&); + int assign(const NdbLogfileGroupImpl&); static NdbLogfileGroupImpl & getImpl(NdbDictionary::LogfileGroup & t); static const NdbLogfileGroupImpl& getImpl(const @@ -403,7 +404,7 @@ public: NdbDatafileImpl(NdbDictionary::Datafile &); ~NdbDatafileImpl(); - void assign(const NdbDatafileImpl&); + int assign(const NdbDatafileImpl&); static NdbDatafileImpl & getImpl(NdbDictionary::Datafile & t); static const NdbDatafileImpl & getImpl(const NdbDictionary::Datafile & t); @@ -416,7 +417,7 @@ public: NdbUndofileImpl(NdbDictionary::Undofile &); ~NdbUndofileImpl(); - void assign(const NdbUndofileImpl&); + int assign(const NdbUndofileImpl&); static NdbUndofileImpl & getImpl(NdbDictionary::Undofile & t); static const NdbUndofileImpl & getImpl(const NdbDictionary::Undofile & t); @@ -994,8 +995,9 @@ public: if(NdbDictInterface::create_index_obj_from_table(&idx, &tab, &m_prim) == 0) { idx->m_table = &tab; - idx->m_externalName.assign(m_index_name); - idx->m_internalName.assign(m_name); + if (!idx->m_externalName.assign(m_index_name) || + !idx->m_internalName.assign(m_name)) + DBUG_RETURN(4000); tab.m_index = idx; DBUG_RETURN(0); } diff --git a/storage/ndb/src/ndbapi/NdbEventOperation.cpp b/storage/ndb/src/ndbapi/NdbEventOperation.cpp index 4d8e6a6aea5..5c6ed562dcc 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp @@ -122,6 +122,12 @@ NdbEventOperation::getGCI() const return m_impl.getGCI(); } +Uint32 +NdbEventOperation::getAnyValue() const +{ + return m_impl.getAnyValue(); +} + Uint64 NdbEventOperation::getLatestGCI() const { diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 45738a3a65f..a82983fca8c 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -41,6 +41,7 @@ #include <NdbEventOperation.hpp> #include "NdbEventOperationImpl.hpp" #include <signaldata/AlterTable.hpp> +#include "ndb_internal.hpp" #include <EventLogger.hpp> extern EventLogger g_eventLogger; @@ -49,7 +50,7 @@ static Gci_container_pod g_empty_gci_container; static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4; static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1; -#ifdef VM_TRACE +#if defined(VM_TRACE) && defined(NOT_USED) static void print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) { @@ -683,6 +684,12 @@ NdbEventOperationImpl::getGCI() return m_data_item->sdata->gci; } +Uint32 +NdbEventOperationImpl::getAnyValue() const +{ + return m_data_item->sdata->anyValue; +} + Uint64 NdbEventOperationImpl::getLatestGCI() { @@ -731,8 +738,6 @@ NdbEventOperationImpl::receive_event() { // Parse the new table definition and // create a table object - NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); - NdbDictionaryImpl *dict = & NdbDictionaryImpl::getImpl(*myDict); NdbError error; NdbDictInterface dif(error); NdbTableImpl *at; @@ -1340,6 +1345,7 @@ operator<<(NdbOut& out, const Gci_container& gci) return out; } +#ifdef VM_TRACE static NdbOut& operator<<(NdbOut& out, const Gci_container_pod& gci) @@ -1348,7 +1354,7 @@ operator<<(NdbOut& out, const Gci_container_pod& gci) out << *ptr; return out; } - +#endif static Gci_container* @@ -1600,7 +1606,7 @@ NdbEventBuffer::complete_outof_order_gcis() ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count, (long) m_complete_data.m_data.m_count); #else - ndbout_c(""); + ndbout_c(" "); #endif } bzero(bucket, sizeof(Gci_container)); @@ -1623,17 +1629,24 @@ NdbEventBuffer::insert_event(NdbEventOperationImpl* impl, Uint32 &oid_ref) { NdbEventOperationImpl *dropped_ev_op = m_dropped_ev_op; + DBUG_PRINT("info", ("gci: %u", data.gci)); do { do { - oid_ref = impl->m_oid; - insertDataL(impl, &data, ptr); + if (impl->m_node_bit_mask.get(0u)) + { + oid_ref = impl->m_oid; + insertDataL(impl, &data, ptr); + } NdbEventOperationImpl* blob_op = impl->theBlobOpList; while (blob_op != NULL) { - oid_ref = blob_op->m_oid; - insertDataL(blob_op, &data, ptr); + if (blob_op->m_node_bit_mask.get(0u)) + { + oid_ref = blob_op->m_oid; + insertDataL(blob_op, &data, ptr); + } blob_op = blob_op->m_next; } } while((impl = impl->m_next)); @@ -1818,6 +1831,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, switch (operation) { case NdbDictionary::Event::_TE_NODE_FAILURE: + DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0); op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri)); DBUG_PRINT("info", ("_TE_NODE_FAILURE: m_ref_count: %u for op: %p id: %u", @@ -1833,29 +1847,23 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, DBUG_RETURN_EVENT(0); break; case NdbDictionary::Event::_TE_CLUSTER_FAILURE: - if (op->m_node_bit_mask.get(0)) - { - op->m_node_bit_mask.clear(); - DBUG_ASSERT(op->m_ref_count > 0); - // remove kernel reference - // added in execute_nolock - op->m_ref_count--; - DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", - op->m_ref_count, op)); - if (op->theMainOp) - { - DBUG_ASSERT(op->m_ref_count == 0); - DBUG_ASSERT(op->theMainOp->m_ref_count > 0); - // remove blob reference in main op - // added in execute_no_lock - op->theMainOp->m_ref_count--; - DBUG_PRINT("info", ("m_ref_count: %u for op: %p", - op->theMainOp->m_ref_count, op->theMainOp)); - } - } - else + DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0); + op->m_node_bit_mask.clear(); + DBUG_ASSERT(op->m_ref_count > 0); + // remove kernel reference + // added in execute_nolock + op->m_ref_count--; + DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", + op->m_ref_count, op)); + if (op->theMainOp) { - DBUG_ASSERT(op->m_node_bit_mask.isclear() != 0); + DBUG_ASSERT(op->m_ref_count == 0); + DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + // remove blob reference in main op + // added in execute_no_lock + op->theMainOp->m_ref_count--; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", + op->theMainOp->m_ref_count, op->theMainOp)); } break; case NdbDictionary::Event::_TE_STOP: @@ -2831,7 +2839,7 @@ send_report: data[5]= apply_gci >> 32; data[6]= latest_gci & ~(Uint32)0; data[7]= latest_gci >> 32; - m_ndb->theImpl->send_event_report(data,8); + Ndb_internal::send_event_report(m_ndb, data,8); #ifdef VM_TRACE assert(m_total_alloc >= m_free_data_sz); #endif diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index 6d58688fa88..3d71146588d 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -366,6 +366,7 @@ public: const bool tableFragmentationChanged() const; const bool tableRangeListChanged() const; Uint64 getGCI(); + Uint32 getAnyValue() const; Uint64 getLatestGCI(); bool execSUB_TABLE_DATA(NdbApiSignal * signal, LinearSectionPtr ptr[3]); diff --git a/storage/ndb/src/ndbapi/NdbImpl.hpp b/storage/ndb/src/ndbapi/NdbImpl.hpp index 083871061b4..39787b1d4be 100644 --- a/storage/ndb/src/ndbapi/NdbImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbImpl.hpp @@ -37,7 +37,7 @@ struct Ndb_free_list_t Ndb_free_list_t(); ~Ndb_free_list_t(); - void fill(Ndb*, Uint32 cnt); + int fill(Ndb*, Uint32 cnt); T* seize(Ndb*); void release(T*); void clear(); @@ -86,10 +86,23 @@ public: BaseString m_prefix; // Buffer for preformatted internal name <db>/<schema>/ - void update_prefix() + int update_prefix() { - m_prefix.assfmt("%s%c%s%c", m_dbname.c_str(), table_name_separator, - m_schemaname.c_str(), table_name_separator); + if (!m_prefix.assfmt("%s%c%s%c", m_dbname.c_str(), table_name_separator, + m_schemaname.c_str(), table_name_separator)) + { + return -1; + } + return 0; + } + +/* + We need this friend accessor function to work around a HP compiler problem, + where template class friends are not working. +*/ + static inline void setNdbError(Ndb &ndb,int code){ + ndb.theError.code = code; + return; } BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/ @@ -203,7 +216,7 @@ Ndb_free_list_t<T>::~Ndb_free_list_t() template<class T> inline -void +int Ndb_free_list_t<T>::fill(Ndb* ndb, Uint32 cnt) { if (m_free_list == 0) @@ -211,18 +224,28 @@ Ndb_free_list_t<T>::fill(Ndb* ndb, Uint32 cnt) m_free_cnt++; m_alloc_cnt++; m_free_list = new T(ndb); + if (m_free_list == 0) + { + NdbImpl::setNdbError(*ndb, 4000); + assert(false); + return -1; + } } while(m_alloc_cnt < cnt) { T* obj= new T(ndb); if(obj == 0) - return; - + { + NdbImpl::setNdbError(*ndb, 4000); + assert(false); + return -1; + } obj->next(m_free_list); m_free_cnt++; m_alloc_cnt++; m_free_list = obj; } + return 0; } template<class T> @@ -243,7 +266,11 @@ Ndb_free_list_t<T>::seize(Ndb* ndb) { m_alloc_cnt++; } - + else + { + NdbImpl::setNdbError(*ndb, 4000); + assert(false); + } return tmp; } diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp index 0965338b325..921769f09e3 100644 --- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -85,6 +85,9 @@ int NdbIndexOperation::readTuple(NdbOperation::LockMode lm) case LM_CommittedRead: return readTuple(); break; + case LM_SimpleRead: + return readTuple(); + break; default: return -1; }; @@ -172,239 +175,6 @@ NdbIndexOperation::getIndex() const return m_theIndex; } -int -NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId) -{ - Uint32 tTransId1, tTransId2; - Uint32 tReqInfo; - Uint32 tSignalCount = 0; - Uint32 tInterpretInd = theInterpretIndicator; - - theErrorLine = 0; - - if (tInterpretInd != 1) { - OperationType tOpType = theOperationType; - OperationStatus tStatus = theStatus; - if ((tOpType == UpdateRequest) || - (tOpType == InsertRequest) || - (tOpType == WriteRequest)) { - if (tStatus != SetValue) { - setErrorCodeAbort(4506); - return -1; - }//if - } else if ((tOpType == ReadRequest) || (tOpType == ReadExclusive) || - (tOpType == DeleteRequest)) { - if (tStatus != GetValue) { - setErrorCodeAbort(4506); - return -1; - }//if - } else { - setErrorCodeAbort(4507); - return -1; - }//if - } else { - if (prepareSendInterpreted() == -1) { - return -1; - }//if - }//if - -//------------------------------------------------------------- -// We start by filling in the first 8 unconditional words of the -// TCINDXREQ signal. -//------------------------------------------------------------- - TcKeyReq * tcKeyReq = - CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend()); - - Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len; - Uint32 tIndexId = m_theIndex->m_id; - Uint32 tSchemaVersion = m_theIndex->m_version; - - tcKeyReq->apiConnectPtr = aTC_ConnectPtr; - tcKeyReq->senderData = ptr2int(); - tcKeyReq->attrLen = tTotalCurrAI_Len; - tcKeyReq->tableId = tIndexId; - tcKeyReq->tableSchemaVersion = tSchemaVersion; - - tTransId1 = (Uint32) aTransactionId; - tTransId2 = (Uint32) (aTransactionId >> 32); - -//------------------------------------------------------------- -// Simple is simple if simple or both start and commit is set. -//------------------------------------------------------------- -// Temporarily disable simple stuff - Uint8 tSimpleIndicator = 0; -// Uint8 tSimpleIndicator = theSimpleIndicator; - Uint8 tCommitIndicator = theCommitIndicator; - Uint8 tStartIndicator = theStartIndicator; -// if ((theNdbCon->theLastOpInList == this) && (theCommitIndicator == 0)) -// abort(); -// Temporarily disable simple stuff - Uint8 tSimpleAlt = 0; -// Uint8 tSimpleAlt = tStartIndicator & tCommitIndicator; - tSimpleIndicator = tSimpleIndicator | tSimpleAlt; - -//------------------------------------------------------------- -// Simple state is set if start and commit is set and it is -// a read request. Otherwise it is set to zero. -//------------------------------------------------------------- - Uint8 tReadInd = (theOperationType == ReadRequest); - Uint8 tSimpleState = tReadInd & tSimpleAlt; - //theNdbCon->theSimpleState = tSimpleState; - - tcKeyReq->transId1 = tTransId1; - tcKeyReq->transId2 = tTransId2; - - tReqInfo = 0; - - if (tTotalCurrAI_Len <= TcKeyReq::MaxAttrInfo) { - tcKeyReq->setAIInTcKeyReq(tReqInfo, tTotalCurrAI_Len); - } else { - tcKeyReq->setAIInTcKeyReq(tReqInfo, TcKeyReq::MaxAttrInfo); - }//if - - tcKeyReq->setSimpleFlag(tReqInfo, tSimpleIndicator); - tcKeyReq->setCommitFlag(tReqInfo, tCommitIndicator); - tcKeyReq->setStartFlag(tReqInfo, tStartIndicator); - const Uint8 tInterpretIndicator = theInterpretIndicator; - tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator); - - Uint8 tDirtyIndicator = theDirtyIndicator; - OperationType tOperationType = theOperationType; - Uint32 tIndexLen = theTupKeyLen; - Uint8 abortOption = theNdbCon->m_abortOption; - - tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator); - tcKeyReq->setOperationType(tReqInfo, tOperationType); - tcKeyReq->setKeyLength(tReqInfo, tIndexLen); - tcKeyReq->setAbortOption(tReqInfo, abortOption); - - Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; - Uint8 tScanIndicator = theScanInfo & 1; - - tcKeyReq->setDistributionKeyFlag(tReqInfo, tDistrKeyIndicator); - tcKeyReq->setScanIndFlag(tReqInfo, tScanIndicator); - - tcKeyReq->requestInfo = tReqInfo; - -//------------------------------------------------------------- -// The next step is to fill in the upto three conditional words. -//------------------------------------------------------------- - Uint32* tOptionalDataPtr = &tcKeyReq->scanInfo; - Uint32 tDistrGHIndex = tScanIndicator; - Uint32 tDistrKeyIndex = tDistrGHIndex; - - Uint32 tScanInfo = theScanInfo; - Uint32 tDistrKey = theDistributionKey; - - tOptionalDataPtr[0] = tScanInfo; - tOptionalDataPtr[tDistrKeyIndex] = tDistrKey; - -//------------------------------------------------------------- -// The next is step is to compress the key data part of the -// TCKEYREQ signal. -//------------------------------------------------------------- - Uint32 tKeyIndex = tDistrKeyIndex + tDistrKeyIndicator; - Uint32* tKeyDataPtr = &tOptionalDataPtr[tKeyIndex]; - Uint32 Tdata1 = tcKeyReq->keyInfo[0]; - Uint32 Tdata2 = tcKeyReq->keyInfo[1]; - Uint32 Tdata3 = tcKeyReq->keyInfo[2]; - Uint32 Tdata4 = tcKeyReq->keyInfo[3]; - Uint32 Tdata5; - - tKeyDataPtr[0] = Tdata1; - tKeyDataPtr[1] = Tdata2; - tKeyDataPtr[2] = Tdata3; - tKeyDataPtr[3] = Tdata4; - if (tIndexLen > 4) { - Tdata1 = tcKeyReq->keyInfo[4]; - Tdata2 = tcKeyReq->keyInfo[5]; - Tdata3 = tcKeyReq->keyInfo[6]; - Tdata4 = tcKeyReq->keyInfo[7]; - - tKeyDataPtr[4] = Tdata1; - tKeyDataPtr[5] = Tdata2; - tKeyDataPtr[6] = Tdata3; - tKeyDataPtr[7] = Tdata4; - }//if -//------------------------------------------------------------- -// Finally we also compress the INDXATTRINFO part of the signal. -// We optimise by using the if-statement for sending INDXKEYINFO -// signals to calculating the new Attrinfo Index. -//------------------------------------------------------------- - Uint32 tAttrInfoIndex; - - if (tIndexLen > TcKeyReq::MaxKeyInfo) { - /** - * Set transid and TC connect ptr in the INDXKEYINFO signals - */ - NdbApiSignal* tSignal = theTCREQ->next(); - Uint32 remainingKey = tIndexLen - TcKeyReq::MaxKeyInfo; - - do { - Uint32* tSigDataPtr = tSignal->getDataPtrSend(); - NdbApiSignal* tnextSignal = tSignal->next(); - tSignalCount++; - tSigDataPtr[0] = aTC_ConnectPtr; - tSigDataPtr[1] = tTransId1; - tSigDataPtr[2] = tTransId2; - if (remainingKey > IndxKeyInfo::DataLength) { - // The signal is full - tSignal->setLength(IndxKeyInfo::MaxSignalLength); - remainingKey -= IndxKeyInfo::DataLength; - } - else { - // Last signal - tSignal->setLength(IndxKeyInfo::HeaderLength + remainingKey); - remainingKey = 0; - } - tSignal = tnextSignal; - } while (tSignal != NULL); - tAttrInfoIndex = tKeyIndex + TcKeyReq::MaxKeyInfo; - } else { - tAttrInfoIndex = tKeyIndex + tIndexLen; - }//if - -//------------------------------------------------------------- -// Perform the Attrinfo packing in the TCKEYREQ signal started -// above. -//------------------------------------------------------------- - Uint32* tAIDataPtr = &tOptionalDataPtr[tAttrInfoIndex]; - Tdata1 = tcKeyReq->attrInfo[0]; - Tdata2 = tcKeyReq->attrInfo[1]; - Tdata3 = tcKeyReq->attrInfo[2]; - Tdata4 = tcKeyReq->attrInfo[3]; - Tdata5 = tcKeyReq->attrInfo[4]; - - theTCREQ->setLength(tcKeyReq->getAIInTcKeyReq(tReqInfo) + - tAttrInfoIndex + TcKeyReq::StaticLength); - tAIDataPtr[0] = Tdata1; - tAIDataPtr[1] = Tdata2; - tAIDataPtr[2] = Tdata3; - tAIDataPtr[3] = Tdata4; - tAIDataPtr[4] = Tdata5; - -/*************************************************** -* Send the INDXATTRINFO signals. -***************************************************/ - if (tTotalCurrAI_Len > 5) { - // Set the last signal's length. - NdbApiSignal* tSignal = theFirstATTRINFO; - theCurrentATTRINFO->setLength(theAI_LenInCurrAI); - do { - Uint32* tSigDataPtr = tSignal->getDataPtrSend(); - NdbApiSignal* tnextSignal = tSignal->next(); - tSignalCount++; - tSigDataPtr[0] = aTC_ConnectPtr; - tSigDataPtr[1] = tTransId1; - tSigDataPtr[2] = tTransId2; - tSignal = tnextSignal; - } while (tSignal != NULL); - }//if - theStatus = WaitResponse; - theReceiver.prepareSend(); - return 0; -} - /*************************************************************************** int receiveTCINDXREF( NdbApiSignal* aSignal) diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp index f95dcfb994c..0ce96b1b4d9 100644 --- a/storage/ndb/src/ndbapi/NdbIndexStat.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp @@ -240,7 +240,6 @@ NdbIndexStat::stat_oldest(const Area& a) m = ~(Uint32)0; // shut up incorrect CC warning for (i = 0; i < a.m_entries; i++) { Pointer& p = a.get_pointer(i); - Entry& e = a.get_entry(i); Uint32 m2 = m_seq >= p.m_seq ? m_seq - p.m_seq : p.m_seq - m_seq; if (! found || m < m2) { m = m2; @@ -427,7 +426,7 @@ NdbIndexStat::records_in_range(const NdbDictionary::Index* index, NdbIndexScanOp DBUG_RETURN(-1); } if (trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, forceSend) == -1) { + NdbOperation::AbortOnError, forceSend) == -1) { m_error = trans->getNdbError(); DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code, op->getNdbError().code)); diff --git a/storage/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp index ff9c50da77c..ddaf5d0b233 100644 --- a/storage/ndb/src/ndbapi/NdbOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbOperation.cpp @@ -76,7 +76,8 @@ NdbOperation::NdbOperation(Ndb* aNdb, NdbOperation::Type aType) : m_keyInfoGSN(GSN_KEYINFO), m_attrInfoGSN(GSN_ATTRINFO), theBlobList(NULL), - m_abortOption(-1) + m_abortOption(-1), + m_noErrorPropagation(false) { theReceiver.init(NdbReceiver::NDB_OPERATION, this); theError.code = 0; @@ -101,7 +102,8 @@ NdbOperation::setErrorCode(int anErrorCode) theError.code = anErrorCode; theNdbCon->theErrorLine = theErrorLine; theNdbCon->theErrorOperation = this; - theNdbCon->setOperationErrorCode(anErrorCode); + if (!(m_abortOption == AO_IgnoreError && m_noErrorPropagation)) + theNdbCon->setOperationErrorCode(anErrorCode); } /****************************************************************************** @@ -116,6 +118,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode) theError.code = anErrorCode; theNdbCon->theErrorLine = theErrorLine; theNdbCon->theErrorOperation = this; + // ignore m_noErrorPropagation theNdbCon->setOperationErrorCodeAbort(anErrorCode); } @@ -161,6 +164,7 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){ theMagicNumber = 0xABCDEF01; theBlobList = NULL; m_abortOption = -1; + m_noErrorPropagation = false; m_no_disk_flag = 1; tSignal = theNdb->getSignal(); @@ -177,7 +181,11 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){ tcKeyReq->scanInfo = 0; theKEYINFOptr = &tcKeyReq->keyInfo[0]; theATTRINFOptr = &tcKeyReq->attrInfo[0]; - theReceiver.init(NdbReceiver::NDB_OPERATION, this); + if (theReceiver.init(NdbReceiver::NDB_OPERATION, this)) + { + // theReceiver sets the error code of its owner + return -1; + } return 0; } @@ -369,12 +377,24 @@ NdbOperation::subValue( const char* anAttrName, Uint32 aValue) } int +NdbOperation::subValue( const char* anAttrName, Uint64 aValue) +{ + return subValue(m_currentTable->getColumn(anAttrName), aValue); +} + +int NdbOperation::subValue(Uint32 anAttrId, Uint32 aValue) { return subValue(m_currentTable->getColumn(anAttrId), aValue); } int +NdbOperation::subValue(Uint32 anAttrId, Uint64 aValue) +{ + return subValue(m_currentTable->getColumn(anAttrId), aValue); +} + +int NdbOperation::read_attr(const char* anAttrName, Uint32 RegDest) { return read_attr(m_currentTable->getColumn(anAttrName), RegDest); @@ -409,3 +429,9 @@ NdbOperation::getTable() const { return m_currentTable; } + +NdbTransaction* +NdbOperation::getNdbTransaction() +{ + return theNdbCon; +} diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp index ba26831749d..73778ab9bbe 100644 --- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -44,6 +44,7 @@ NdbOperation::insertTuple() tNdbCon->theSimpleState = 0; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -64,6 +65,7 @@ NdbOperation::updateTuple() theOperationType = UpdateRequest; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -84,12 +86,35 @@ NdbOperation::writeTuple() theOperationType = WriteRequest; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); return -1; }//if }//NdbOperation::writeTuple() +/***************************************************************************** + * int deleteTuple(); + *****************************************************************************/ +int +NdbOperation::deleteTuple() +{ + NdbTransaction* tNdbCon = theNdbCon; + int tErrorLine = theErrorLine; + if (theStatus == Init) { + theStatus = OperationDefined; + tNdbCon->theSimpleState = 0; + theOperationType = DeleteRequest; + theErrorLine = tErrorLine++; + theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; + return 0; + } else { + setErrorCode(4200); + return -1; + }//if +}//NdbOperation::deleteTuple() + /****************************************************************************** * int readTuple(); *****************************************************************************/ @@ -106,6 +131,8 @@ NdbOperation::readTuple(NdbOperation::LockMode lm) case LM_CommittedRead: return committedRead(); break; + case LM_SimpleRead: + return simpleRead(); default: return -1; }; @@ -124,6 +151,7 @@ NdbOperation::readTuple() theOperationType = ReadRequest; theErrorLine = tErrorLine++; theLockMode = LM_Read; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -131,27 +159,6 @@ NdbOperation::readTuple() }//if }//NdbOperation::readTuple() -/***************************************************************************** - * int deleteTuple(); - *****************************************************************************/ -int -NdbOperation::deleteTuple() -{ - NdbTransaction* tNdbCon = theNdbCon; - int tErrorLine = theErrorLine; - if (theStatus == Init) { - theStatus = OperationDefined; - tNdbCon->theSimpleState = 0; - theOperationType = DeleteRequest; - theErrorLine = tErrorLine++; - theLockMode = LM_Exclusive; - return 0; - } else { - setErrorCode(4200); - return -1; - }//if -}//NdbOperation::deleteTuple() - /****************************************************************************** * int readTupleExclusive(); *****************************************************************************/ @@ -166,6 +173,7 @@ NdbOperation::readTupleExclusive() theOperationType = ReadExclusive; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -179,24 +187,22 @@ NdbOperation::readTupleExclusive() int NdbOperation::simpleRead() { - /** - * Currently/still disabled - */ - return readTuple(); -#if 0 + NdbTransaction* tNdbCon = theNdbCon; int tErrorLine = theErrorLine; if (theStatus == Init) { theStatus = OperationDefined; theOperationType = ReadRequest; theSimpleIndicator = 1; + theDirtyIndicator = 0; theErrorLine = tErrorLine++; - theLockMode = LM_Read; + theLockMode = LM_SimpleRead; + m_abortOption = AO_IgnoreError; + tNdbCon->theSimpleState = 0; return 0; } else { setErrorCode(4200); return -1; }//if -#endif }//NdbOperation::simpleRead() /***************************************************************************** @@ -222,6 +228,7 @@ NdbOperation::committedRead() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -245,6 +252,7 @@ NdbOperation::dirtyUpdate() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -268,6 +276,7 @@ NdbOperation::dirtyWrite() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -290,6 +299,7 @@ NdbOperation::interpretedUpdateTuple() theAI_LenInCurrAI = 25; theLockMode = LM_Exclusive; theErrorLine = tErrorLine++; + m_abortOption = AbortOnError; initInterpreter(); return 0; } else { @@ -314,6 +324,7 @@ NdbOperation::interpretedDeleteTuple() theErrorLine = tErrorLine++; theAI_LenInCurrAI = 25; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; initInterpreter(); return 0; } else { @@ -322,6 +333,40 @@ NdbOperation::interpretedDeleteTuple() }//if }//NdbOperation::interpretedDeleteTuple() +void +NdbOperation::setReadLockMode(LockMode lockMode) +{ + /* We only support changing lock mode for read operations at this time. */ + assert(theOperationType == ReadRequest || theOperationType == ReadExclusive); + switch (lockMode) { + case LM_CommittedRead: /* TODO, check theNdbCon->theSimpleState */ + theOperationType= ReadRequest; + theSimpleIndicator= 1; + theDirtyIndicator= 1; + break; + case LM_SimpleRead: /* TODO, check theNdbCon->theSimpleState */ + theOperationType= ReadRequest; + theSimpleIndicator= 1; + theDirtyIndicator= 0; + break; + case LM_Read: + theNdbCon->theSimpleState= 0; + theOperationType= ReadRequest; + theSimpleIndicator= 0; + theDirtyIndicator= 0; + break; + case LM_Exclusive: + theNdbCon->theSimpleState= 0; + theOperationType= ReadExclusive; + theSimpleIndicator= 0; + theDirtyIndicator= 0; + break; + default: + /* Not supported / invalid. */ + assert(false); + } + theLockMode= lockMode; +} /****************************************************************************** @@ -530,11 +575,9 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if }//if - // Including bits in last word - const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Excluding bits in last word const Uint32 sizeInWords = sizeInBytes / 4; - AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); + (void) AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); insertATTRINFO( ahValue ); /*********************************************************************** @@ -560,12 +603,34 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if theErrorLine++; DBUG_RETURN(0); - -error: - setErrorCodeAbort(tReturnCode); - DBUG_RETURN(-1); }//NdbOperation::setValue() + +int +NdbOperation::setAnyValue(Uint32 any_value) +{ + const NdbColumnImpl* impl = + &NdbColumnImpl::getImpl(* NdbDictionary::Column::ANY_VALUE); + OperationType tOpType = theOperationType; + + switch(tOpType){ + case DeleteRequest:{ + Uint32 ah; + AttributeHeader::init(&ah, AttributeHeader::ANY_VALUE, 4); + if (insertATTRINFO(ah) != -1 && insertATTRINFO(any_value) != -1 ) + { + return 0; + } + } + default: + return setValue(impl, (const char *)&any_value); + } + + setErrorCodeAbort(4000); + return -1; +} + + NdbBlob* NdbOperation::getBlobHandle(NdbTransaction* aCon, const NdbColumnImpl* tAttrInfo) { @@ -710,6 +775,22 @@ insertATTRINFO_error1: }//NdbOperation::insertATTRINFOloop() +NdbOperation::AbortOption +NdbOperation::getAbortOption() const +{ + return (AbortOption)m_abortOption; +} - - +int +NdbOperation::setAbortOption(AbortOption ao) +{ + switch(ao) + { + case AO_IgnoreError: + case AbortOnError: + m_abortOption= ao; + return 0; + default: + return -1; + } +} diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index 80f388605b0..27672e0458c 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -99,7 +99,9 @@ Parameters: aTC_ConnectPtr: the Connect pointer to TC. Remark: Puts the the data into TCKEYREQ signal and optional KEYINFO and ATTRINFO signals. ***************************************************************************/ int -NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) +NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, + Uint64 aTransId, + AbortOption ao) { Uint32 tTransId1, tTransId2; Uint32 tReqInfo; @@ -147,8 +149,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) //------------------------------------------------------------- TcKeyReq * const tcKeyReq = CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend()); - Uint32 tTableId = m_currentTable->m_id; - Uint32 tSchemaVersion = m_currentTable->m_version; + Uint32 tTableId = m_accessTable->m_id; + Uint32 tSchemaVersion = m_accessTable->m_version; tcKeyReq->apiConnectPtr = aTC_ConnectPtr; tcKeyReq->apiOperationPtr = ptr2int(); @@ -173,12 +175,11 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) Uint8 tInterpretIndicator = theInterpretIndicator; Uint8 tNoDisk = m_no_disk_flag; -//------------------------------------------------------------- -// Simple state is set if start and commit is set and it is -// a read request. Otherwise it is set to zero. -//------------------------------------------------------------- + /** + * A dirty read, can not abort the transaction + */ Uint8 tReadInd = (theOperationType == ReadRequest); - Uint8 tSimpleState = tReadInd & tSimpleIndicator; + Uint8 tDirtyState = tReadInd & tDirtyIndicator; tcKeyReq->transId1 = tTransId1; tcKeyReq->transId2 = tTransId2; @@ -198,16 +199,16 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) OperationType tOperationType = theOperationType; Uint32 tTupKeyLen = theTupKeyLen; - Uint8 abortOption = - m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption; + Uint8 abortOption = (ao == DefaultAbortOption) ? (Uint8) m_abortOption : (Uint8) ao; tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator); tcKeyReq->setOperationType(tReqInfo, tOperationType); tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen); - // A simple read is always ignore error - abortOption = tSimpleIndicator ? (Uint8) AO_IgnoreError : abortOption; + // A dirty read is always ignore error + abortOption = tDirtyState ? (Uint8) AO_IgnoreError : (Uint8) abortOption; tcKeyReq->setAbortOption(tReqInfo, abortOption); + m_abortOption = abortOption; Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; Uint8 tScanIndicator = theScanInfo & 1; @@ -543,21 +544,16 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) return -1; }//if - AbortOption ao = (AbortOption) - (m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption); + setErrorCode(aSignal->readData(4)); + theStatus = Finished; theReceiver.m_received_result_length = ~0; - theStatus = Finished; - // blobs want this - if (m_abortOption != AO_IgnoreError) + // not dirty read + if(! (theOperationType == ReadRequest && theDirtyIndicator)) { - theNdbCon->theReturnStatus = NdbTransaction::ReturnFailure; + theNdbCon->OpCompleteFailure(this); + return -1; } - theError.code = aSignal->readData(4); - theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), ao); - - if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read - return theNdbCon->OpCompleteFailure(ao, m_abortOption != AO_IgnoreError); /** * If TCKEYCONF has arrived @@ -565,23 +561,8 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) */ if(theReceiver.m_expected_result_length) { - return theNdbCon->OpCompleteFailure(AbortOnError); + return theNdbCon->OpCompleteFailure(this); } return -1; } - - -void -NdbOperation::handleFailedAI_ElemLen() -{ - NdbRecAttr* tRecAttr = theReceiver.theFirstRecAttr; - while (tRecAttr != NULL) { - tRecAttr->setNULL(); - tRecAttr = tRecAttr->next(); - }//while -}//NdbOperation::handleFailedAI_ElemLen() - - - - diff --git a/storage/ndb/src/ndbapi/NdbOperationInt.cpp b/storage/ndb/src/ndbapi/NdbOperationInt.cpp index 0df1dbfe2c8..f69211cb78b 100644 --- a/storage/ndb/src/ndbapi/NdbOperationInt.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationInt.cpp @@ -1023,7 +1023,7 @@ NdbOperation::branch_col(Uint32 type, DBUG_PRINT("enter", ("type: %u col:%u val: 0x%lx len: %u label: %u", type, ColId, (long) val, len, Label)); if (val != NULL) - DBUG_DUMP("value", (char*)val, len); + DBUG_DUMP("value", (uchar*)val, len); if (initial_interpreterCheck() == -1) DBUG_RETURN(-1); diff --git a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp index dd995989799..605c66d9859 100644 --- a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -60,7 +60,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, tAttrInfo->m_name.c_str(), theOperationType, (long) aValuePassed)); - Uint32 tData; const char* aValue = aValuePassed; Uint64 tempData[512]; @@ -310,10 +309,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error2: setErrorCodeAbort(4206); DBUG_RETURN(-1); - - equal_error3: - setErrorCodeAbort(4209); - DBUG_RETURN(-1); } /****************************************************************************** @@ -343,7 +338,6 @@ NdbOperation::insertKEYINFO(const char* aValue, Uint32 tEndPos; Uint32 tPos; Uint32 signalCounter; - Uint32 tData; /***************************************************************************** * Calculate the end position of the attribute in the key information. * @@ -543,7 +537,6 @@ NdbOperation::handle_distribution_key(const Uint64* value, Uint32 len) * Copy distribution key to linear memory */ NdbColumnImpl* const * cols = m_accessTable->m_columns.getBase(); - Uint32 len = 0; Uint64 tmp[1000]; Uint32 chunk = 8; diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp index 92cb357ad5c..38ca14085f0 100644 --- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp @@ -81,6 +81,7 @@ NdbRecAttr::setup(const NdbColumnImpl* anAttrInfo, char* aValue) theRef = tRef; return 0; } + errno = ENOMEM; return -1; } @@ -100,7 +101,11 @@ NdbRecAttr::copyout() NdbRecAttr * NdbRecAttr::clone() const { NdbRecAttr * ret = new NdbRecAttr(0); - + if (ret == NULL) + { + errno = ENOMEM; + return NULL; + } ret->theAttrId = theAttrId; ret->m_size_in_bytes = m_size_in_bytes; ret->m_column = m_column; @@ -112,6 +117,12 @@ NdbRecAttr::clone() const { ret->theValue = 0; } else { ret->theStorageX = new Uint64[((n + 7) >> 3)]; + if (ret->theStorageX == NULL) + { + delete ret; + errno = ENOMEM; + return NULL; + } ret->theRef = (char*)ret->theStorageX; ret->theValue = 0; } @@ -120,8 +131,8 @@ NdbRecAttr::clone() const { } bool -NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ - const Uint32 n = m_size_in_bytes; +NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz) +{ if(sz) { if(!copyoutRequired()) @@ -138,8 +149,24 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ return false; } +NdbRecordPrintFormat::NdbRecordPrintFormat() +{ + fields_terminated_by= ";"; + start_array_enclosure= "["; + end_array_enclosure= "]"; + fields_enclosed_by= ""; + fields_optionally_enclosed_by= "\""; + lines_terminated_by= "\n"; + hex_prefix= "H'"; + null_string= "[NULL]"; + hex_format= 0; +} +NdbRecordPrintFormat::~NdbRecordPrintFormat() {} +static const NdbRecordPrintFormat default_print_format; + static void -ndbrecattr_print_string(NdbOut& out, const char *type, +ndbrecattr_print_string(NdbOut& out, const NdbRecordPrintFormat &f, + const char *type, bool is_binary, const char *aref, unsigned sz) { const unsigned char* ref = (const unsigned char*)aref; @@ -148,6 +175,25 @@ ndbrecattr_print_string(NdbOut& out, const char *type, for (i=sz-1; i >= 0; i--) if (ref[i] == 0) sz--; else break; + if (!is_binary) + { + // trailing spaces are not printed + for (i=sz-1; i >= 0; i--) + if (ref[i] == 32) sz--; + else break; + } + if (is_binary && f.hex_format) + { + if (sz == 0) + { + out.print("0x0"); + return; + } + out.print("0x"); + for (len = 0; len < (int)sz; len++) + out.print("%02X", (int)ref[len]); + return; + } if (sz == 0) return; // empty for (len=0; len < (int)sz && ref[i] != 0; len++) @@ -168,43 +214,63 @@ ndbrecattr_print_string(NdbOut& out, const char *type, for (i= len+1; ref[i] != 0; i++) out.print("%u]",len-i); assert((int)sz > i); - ndbrecattr_print_string(out,type,aref+i,sz-i); + ndbrecattr_print_string(out,f,type,is_binary,aref+i,sz-i); } } -NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) +NdbOut& +ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r, + const NdbRecordPrintFormat &f) { if (r.isNULL()) { - out << "[NULL]"; + out << f.null_string; return out; } const NdbDictionary::Column* c = r.getColumn(); uint length = c->getLength(); - if (length > 1) - out << "["; - - for (Uint32 j = 0; j < length; j++) { - if (j > 0) - out << " "; - + const char *fields_optionally_enclosed_by; + if (f.fields_enclosed_by[0] == '\0') + fields_optionally_enclosed_by= + f.fields_optionally_enclosed_by; + else + fields_optionally_enclosed_by= ""; + out << f.fields_enclosed_by; + Uint32 j; switch(r.getType()){ case NdbDictionary::Column::Bigunsigned: out << r.u_64_value(); break; case NdbDictionary::Column::Bit: - out << hex << "H'" << r.u_32_value() << dec; + out << f.hex_prefix << "0x"; + { + const Uint32 *buf = (Uint32 *)r.aRef(); + int k = (length+31)/32; + while (k > 0 && (buf[--k] == 0)); + out.print("%X", buf[k]); + while (k > 0) + out.print("%.8X", buf[--k]); + } break; case NdbDictionary::Column::Unsigned: - out << *((Uint32*)r.aRef() + j); + if (length > 1) + out << f.start_array_enclosure; + out << *(Uint32*)r.aRef(); + for (j = 1; j < length; j++) + out << " " << *((Uint32*)r.aRef() + j); + if (length > 1) + out << f.end_array_enclosure; + break; + case NdbDictionary::Column::Mediumunsigned: + out << r.u_medium_value(); break; case NdbDictionary::Column::Smallunsigned: out << r.u_short_value(); break; case NdbDictionary::Column::Tinyunsigned: - out << (unsigned) r.u_char_value(); + out << (unsigned) r.u_8_value(); break; case NdbDictionary::Column::Bigint: out << r.int64_value(); @@ -212,32 +278,47 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) case NdbDictionary::Column::Int: out << r.int32_value(); break; + case NdbDictionary::Column::Mediumint: + out << r.medium_value(); + break; case NdbDictionary::Column::Smallint: out << r.short_value(); break; case NdbDictionary::Column::Tinyint: - out << (int) r.char_value(); + out << (int) r.int8_value(); break; case NdbDictionary::Column::Binary: + if (!f.hex_format) + out << fields_optionally_enclosed_by; j = r.get_size_in_bytes(); - ndbrecattr_print_string(out,"Binary", r.aRef(), j); + ndbrecattr_print_string(out,f,"Binary", true, r.aRef(), j); + if (!f.hex_format) + out << fields_optionally_enclosed_by; break; case NdbDictionary::Column::Char: + out << fields_optionally_enclosed_by; j = r.get_size_in_bytes(); - ndbrecattr_print_string(out,"Char", r.aRef(), j); + ndbrecattr_print_string(out,f,"Char", false, r.aRef(), j); + out << fields_optionally_enclosed_by; break; case NdbDictionary::Column::Varchar: { + out << fields_optionally_enclosed_by; unsigned len = *(const unsigned char*)r.aRef(); - ndbrecattr_print_string(out,"Varchar", r.aRef()+1,len); + ndbrecattr_print_string(out,f,"Varchar", false, r.aRef()+1,len); j = length; + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Varbinary: { + if (!f.hex_format) + out << fields_optionally_enclosed_by; unsigned len = *(const unsigned char*)r.aRef(); - ndbrecattr_print_string(out,"Varbinary", r.aRef()+1,len); + ndbrecattr_print_string(out,f,"Varbinary", true, r.aRef()+1,len); j = length; + if (!f.hex_format) + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Float: @@ -330,7 +411,7 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) break; case NdbDictionary::Column::Year: { - uint year = 1900 + r.u_char_value(); + uint year = 1900 + r.u_8_value(); char buf[40]; sprintf(buf, "%04d", year); out << buf; @@ -366,16 +447,26 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) break; case NdbDictionary::Column::Longvarchar: { + out << fields_optionally_enclosed_by; + unsigned len = uint2korr(r.aRef()); + ndbrecattr_print_string(out,f,"Longvarchar", false, r.aRef()+2,len); + j = length; + out << fields_optionally_enclosed_by; + } + break; + case NdbDictionary::Column::Longvarbinary: + { + if (!f.hex_format) + out << fields_optionally_enclosed_by; unsigned len = uint2korr(r.aRef()); - ndbrecattr_print_string(out,"Longvarchar", r.aRef()+2,len); + ndbrecattr_print_string(out,f,"Longvarbinary", true, r.aRef()+2,len); j = length; + if (!f.hex_format) + out << fields_optionally_enclosed_by; } break; case NdbDictionary::Column::Undefined: - case NdbDictionary::Column::Mediumint: - case NdbDictionary::Column::Mediumunsigned: - case NdbDictionary::Column::Longvarbinary: unknown: //default: /* no print functions for the rest, just print type */ out << (int) r.getType(); @@ -384,16 +475,17 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) out << " " << j << " times"; break; } - } - - if (length > 1) - { - out << "]"; + out << f.fields_enclosed_by; } return out; } +NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) +{ + return ndbrecattr_print_formatted(out, r, default_print_format); +} + Int64 NdbRecAttr::int64_value() const { @@ -425,3 +517,15 @@ NdbRecAttr::double_value() const memcpy(&val,theRef,sizeof(val)); return val; } + +Int32 +NdbRecAttr::medium_value() const +{ + return sint3korr((unsigned char *)theRef); +} + +Uint32 +NdbRecAttr::u_medium_value() const +{ + return uint3korr((unsigned char*)theRef); +} diff --git a/storage/ndb/src/ndbapi/NdbReceiver.cpp b/storage/ndb/src/ndbapi/NdbReceiver.cpp index 8f91cd10be0..5a311bcbefe 100644 --- a/storage/ndb/src/ndbapi/NdbReceiver.cpp +++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp @@ -32,7 +32,7 @@ NdbReceiver::NdbReceiver(Ndb *aNdb) : { theCurrentRecAttr = theFirstRecAttr = 0; m_defined_rows = 0; - m_rows = new NdbRecAttr*[0]; + m_rows = NULL; } NdbReceiver::~NdbReceiver() @@ -45,19 +45,26 @@ NdbReceiver::~NdbReceiver() DBUG_VOID_RETURN; } -void +int NdbReceiver::init(ReceiverType type, void* owner) { theMagicNumber = 0x11223344; m_type = type; m_owner = owner; + theFirstRecAttr = NULL; + theCurrentRecAttr = NULL; if (m_id == NdbObjectIdMap::InvalidId) { if (m_ndb) + { m_id = m_ndb->theImpl->theNdbObjectIdMap.map(this); + if (m_id == NdbObjectIdMap::InvalidId) + { + setErrorCode(4000); + return -1; + } + } } - - theFirstRecAttr = NULL; - theCurrentRecAttr = NULL; + return 0; } void @@ -146,7 +153,7 @@ NdbReceiver::calculate_batch_size(Uint32 key_size, return; } -void +int NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size, @@ -154,7 +161,11 @@ NdbReceiver::do_get_value(NdbReceiver * org, if(rows > m_defined_rows){ delete[] m_rows; m_defined_rows = rows; - m_rows = new NdbRecAttr*[rows + 1]; + if ((m_rows = new NdbRecAttr*[rows + 1]) == NULL) + { + setErrorCode(4000); + return -1; + } } m_rows[rows] = 0; @@ -174,7 +185,7 @@ NdbReceiver::do_get_value(NdbReceiver * org, // Put key-recAttr fir on each row if(key_size && !getValue(&key, (char*)0)){ abort(); - return ; // -1 + return -1; } if(range_no && @@ -193,7 +204,7 @@ NdbReceiver::do_get_value(NdbReceiver * org, if(tRecAttr){ abort(); - return ;// -1; + return -1; } // Store first recAttr for each row in m_rows[i] @@ -205,7 +216,7 @@ NdbReceiver::do_get_value(NdbReceiver * org, } prepareSend(); - return; + return 0; } NdbRecAttr* @@ -272,7 +283,7 @@ NdbReceiver::execTRANSID_AI(const Uint32* aDataPtr, Uint32 aLength) Uint32 tmp = m_received_result_length + aLength; m_received_result_length = tmp; - return (tmp == exp || (exp > TcKeyConf::SimpleReadBit) ? 1 : 0); + return (tmp == exp || (exp > TcKeyConf::DirtyReadBit) ? 1 : 0); } int diff --git a/storage/ndb/src/ndbapi/NdbScanFilter.cpp b/storage/ndb/src/ndbapi/NdbScanFilter.cpp index 2e9e338d5aa..25f74ce71a4 100644 --- a/storage/ndb/src/ndbapi/NdbScanFilter.cpp +++ b/storage/ndb/src/ndbapi/NdbScanFilter.cpp @@ -14,11 +14,15 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <NdbScanFilter.hpp> +#include <Ndb.hpp> #include <NdbOperation.hpp> #include "NdbDictionaryImpl.hpp" #include <Vector.hpp> #include <NdbOut.hpp> #include <Interpreter.hpp> +#include <signaldata/AttrInfo.hpp> +#include "NdbApiSignal.hpp" +#include "NdbUtil.hpp" #ifdef VM_TRACE #include <NdbEnv.h> @@ -31,6 +35,7 @@ class NdbScanFilterImpl { public: + NdbScanFilterImpl() {} struct State { NdbScanFilter::Group m_group; Uint32 m_popCount; @@ -41,7 +46,9 @@ public: int m_label; State m_current; + Uint32 m_negative; //used for translating NAND/NOR to AND/OR, equal 0 or 1 Vector<State> m_stack; + Vector<Uint32> m_stack2; //to store info of m_negative NdbOperation * m_operation; Uint32 m_latestAttrib; @@ -49,14 +56,37 @@ public: int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, const void * value, Uint32 len); + + bool m_abort_on_too_large; + + NdbOperation::OperationStatus m_initial_op_status; + Uint32 m_initial_AI_size; + Uint32 m_max_size; + + Uint32 get_size() { + assert(m_operation->theTotalCurrAI_Len >= m_initial_AI_size); + return m_operation->theTotalCurrAI_Len - m_initial_AI_size; + } + bool check_size() { + if (get_size() <= m_max_size) + return true; + handle_filter_too_large(); + return false; + } + void handle_filter_too_large(); + + NdbError m_error; }; const Uint32 LabelExit = ~0; -NdbScanFilter::NdbScanFilter(class NdbOperation * op) +NdbScanFilter::NdbScanFilter(class NdbOperation * op, + bool abort_on_too_large, + Uint32 max_size) : m_impl(* new NdbScanFilterImpl()) { + DBUG_ENTER("NdbScanFilter::NdbScanFilter"); m_impl.m_current.m_group = (NdbScanFilter::Group)0; m_impl.m_current.m_popCount = 0; m_impl.m_current.m_ownLabel = 0; @@ -65,6 +95,22 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op) m_impl.m_label = 0; m_impl.m_latestAttrib = ~0; m_impl.m_operation = op; + m_impl.m_negative = 0; + + DBUG_PRINT("info", ("op status: %d tot AI: %u in curr: %u", + op->theStatus, + op->theTotalCurrAI_Len, op->theAI_LenInCurrAI)); + + m_impl.m_abort_on_too_large = abort_on_too_large; + + m_impl.m_initial_op_status = op->theStatus; + m_impl.m_initial_AI_size = op->theTotalCurrAI_Len; + if (max_size > NDB_MAX_SCANFILTER_SIZE_IN_WORDS) + max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS; + m_impl.m_max_size = max_size; + + m_impl.m_error.code = 0; + DBUG_VOID_RETURN; } NdbScanFilter::~NdbScanFilter(){ @@ -74,18 +120,43 @@ NdbScanFilter::~NdbScanFilter(){ int NdbScanFilter::begin(Group group){ + if (m_impl.m_stack2.push_back(m_impl.m_negative)) + { + m_impl.m_operation->setErrorCodeAbort(4000); + return -1; + } switch(group){ case NdbScanFilter::AND: INT_DEBUG(("Begin(AND)")); + if(m_impl.m_negative == 1){ + group = NdbScanFilter::OR; + } break; case NdbScanFilter::OR: INT_DEBUG(("Begin(OR)")); + if(m_impl.m_negative == 1){ + group = NdbScanFilter::AND; + } break; case NdbScanFilter::NAND: INT_DEBUG(("Begin(NAND)")); + if(m_impl.m_negative == 0){ + group = NdbScanFilter::OR; + m_impl.m_negative = 1; + }else{ + group = NdbScanFilter::AND; + m_impl.m_negative = 0; + } break; case NdbScanFilter::NOR: INT_DEBUG(("Begin(NOR)")); + if(m_impl.m_negative == 0){ + group = NdbScanFilter::AND; + m_impl.m_negative = 1; + }else{ + group = NdbScanFilter::OR; + m_impl.m_negative = 0; + } break; } @@ -102,7 +173,11 @@ NdbScanFilter::begin(Group group){ } NdbScanFilterImpl::State tmp = m_impl.m_current; - m_impl.m_stack.push_back(m_impl.m_current); + if (m_impl.m_stack.push_back(m_impl.m_current)) + { + m_impl.m_operation->setErrorCodeAbort(4000); + return -1; + } m_impl.m_current.m_group = group; m_impl.m_current.m_ownLabel = m_impl.m_label++; m_impl.m_current.m_popCount = 0; @@ -129,6 +204,13 @@ NdbScanFilter::begin(Group group){ int NdbScanFilter::end(){ + if(m_impl.m_stack2.size() == 0){ + m_impl.m_operation->setErrorCodeAbort(4259); + return -1; + } + m_impl.m_negative = m_impl.m_stack2.back(); + m_impl.m_stack2.erase(m_impl.m_stack2.size() - 1); + switch(m_impl.m_current.m_group){ case NdbScanFilter::AND: INT_DEBUG(("End(AND pc=%d)", m_impl.m_current.m_popCount)); @@ -150,36 +232,48 @@ NdbScanFilter::end(){ } NdbScanFilterImpl::State tmp = m_impl.m_current; + if(m_impl.m_stack.size() == 0){ + m_impl.m_operation->setErrorCodeAbort(4259); + return -1; + } m_impl.m_current = m_impl.m_stack.back(); m_impl.m_stack.erase(m_impl.m_stack.size() - 1); switch(tmp.m_group){ case NdbScanFilter::AND: if(tmp.m_trueLabel == (Uint32)~0){ - m_impl.m_operation->interpret_exit_ok(); + if (m_impl.m_operation->interpret_exit_ok() == -1) + return -1; } else { - m_impl.m_operation->branch_label(tmp.m_trueLabel); + if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1) + return -1; } break; case NdbScanFilter::NAND: if(tmp.m_trueLabel == (Uint32)~0){ - m_impl.m_operation->interpret_exit_nok(); + if (m_impl.m_operation->interpret_exit_nok() == -1) + return -1; } else { - m_impl.m_operation->branch_label(tmp.m_falseLabel); + if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1) + return -1; } break; case NdbScanFilter::OR: if(tmp.m_falseLabel == (Uint32)~0){ - m_impl.m_operation->interpret_exit_nok(); + if (m_impl.m_operation->interpret_exit_nok() == -1) + return -1; } else { - m_impl.m_operation->branch_label(tmp.m_falseLabel); + if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1) + return -1; } break; case NdbScanFilter::NOR: if(tmp.m_falseLabel == (Uint32)~0){ - m_impl.m_operation->interpret_exit_ok(); + if (m_impl.m_operation->interpret_exit_ok() == -1) + return -1; } else { - m_impl.m_operation->branch_label(tmp.m_trueLabel); + if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1) + return -1; } break; default: @@ -187,24 +281,29 @@ NdbScanFilter::end(){ return -1; } - m_impl.m_operation->def_label(tmp.m_ownLabel); + if (m_impl.m_operation->def_label(tmp.m_ownLabel) == -1) + return -1; if(m_impl.m_stack.size() == 0){ switch(tmp.m_group){ case NdbScanFilter::AND: case NdbScanFilter::NOR: - m_impl.m_operation->interpret_exit_nok(); + if (m_impl.m_operation->interpret_exit_nok() == -1) + return -1; break; case NdbScanFilter::OR: case NdbScanFilter::NAND: - m_impl.m_operation->interpret_exit_ok(); + if (m_impl.m_operation->interpret_exit_ok() == -1) + return -1; break; default: m_impl.m_operation->setErrorCodeAbort(4260); return -1; } } - + + if (!m_impl.check_size()) + return -1; return 0; } @@ -217,10 +316,16 @@ NdbScanFilter::istrue(){ } if(m_impl.m_current.m_trueLabel == (Uint32)~0){ - return m_impl.m_operation->interpret_exit_ok(); + if (m_impl.m_operation->interpret_exit_ok() == -1) + return -1; } else { - return m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel); + if (m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel) == -1) + return -1; } + + if (!m_impl.check_size()) + return -1; + return 0; } int @@ -232,12 +337,22 @@ NdbScanFilter::isfalse(){ } if(m_impl.m_current.m_falseLabel == (Uint32)~0){ - return m_impl.m_operation->interpret_exit_nok(); + if (m_impl.m_operation->interpret_exit_nok() == -1) + return -1; } else { - return m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel); + if (m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel) == -1) + return -1; } + + if (!m_impl.check_size()) + return -1; + return 0; } +NdbOperation * +NdbScanFilter::getNdbOperation(){ + return m_impl.m_operation; +} #define action(x, y, z) @@ -286,18 +401,28 @@ NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){ } Branch1 branch = table2[op].m_branches[m_current.m_group]; - (m_operation->* branch)(AttrId, m_current.m_ownLabel); + if ((m_operation->* branch)(AttrId, m_current.m_ownLabel) == -1) + return -1; + + if (!check_size()) + return -1; return 0; } int NdbScanFilter::isnull(int AttrId){ - return m_impl.cond_col(Interpreter::IS_NULL, AttrId); + if(m_impl.m_negative == 1) + return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId); + else + return m_impl.cond_col(Interpreter::IS_NULL, AttrId); } int NdbScanFilter::isnotnull(int AttrId){ - return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId); + if(m_impl.m_negative == 1) + return m_impl.cond_col(Interpreter::IS_NULL, AttrId); + else + return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId); } struct tab3 { @@ -394,8 +519,17 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op, m_operation->setErrorCodeAbort(4260); return -1; } + + StrBranch2 branch; + if(m_negative == 1){ //change NdbOperation to its negative + if(m_current.m_group == NdbScanFilter::AND) + branch = table3[op].m_branches[(Uint32)(m_current.m_group) + 1]; + if(m_current.m_group == NdbScanFilter::OR) + branch = table3[op].m_branches[(Uint32)(m_current.m_group) - 1]; + }else{ + branch = table3[op].m_branches[(Uint32)(m_current.m_group)]; + } - StrBranch2 branch = table3[op].m_branches[m_current.m_group]; const NdbDictionary::Column * col = m_operation->m_currentTable->getColumn(AttrId); @@ -404,8 +538,12 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op, return -1; } - int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel); - return ret; + if ((m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel) == -1) + return -1; + + if (!check_size()) + return -1; + return 0; } int @@ -431,7 +569,130 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId, return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len); } return -1; -} +} + +void +NdbScanFilterImpl::handle_filter_too_large() +{ + DBUG_ENTER("NdbScanFilterImpl::handle_filter_too_large"); + + NdbOperation* const op = m_operation; + m_error.code = NdbScanFilter::FilterTooLarge; + if (m_abort_on_too_large) + op->setErrorCodeAbort(m_error.code); + + /* + * Possible interpreted parts at this point are: + * + * 1. initial read + * 2. interpreted program + * + * It is assumed that NdbScanFilter has created all of 2 + * so that we don't have to save interpreter state. + */ + + const Uint32 size = get_size(); + assert(size != 0); + + // new ATTRINFO size + const Uint32 new_size = m_initial_AI_size; + + // find last signal for new size + assert(op->theFirstATTRINFO != NULL); + NdbApiSignal* lastSignal = op->theFirstATTRINFO; + Uint32 n = 0; + while (n + AttrInfo::DataLength < new_size) { + lastSignal = lastSignal->next(); + assert(lastSignal != NULL); + n += AttrInfo::DataLength; + } + assert(n < size); + + // release remaining signals + NdbApiSignal* tSignal = lastSignal->next(); + op->theNdb->releaseSignalsInList(&tSignal); + lastSignal->next(NULL); + + // length of lastSignal + const Uint32 new_curr = AttrInfo::HeaderLength + new_size - n; + assert(new_curr <= 25); + + DBUG_PRINT("info", ("op status: %d->%d tot AI: %u->%u in curr: %u->%u", + op->theStatus, m_initial_op_status, + op->theTotalCurrAI_Len, new_size, + op->theAI_LenInCurrAI, new_curr)); + + // reset op state + op->theStatus = m_initial_op_status; + + // reset interpreter state to initial + + NdbBranch* tBranch = op->theFirstBranch; + while (tBranch != NULL) { + NdbBranch* tmp = tBranch; + tBranch = tBranch->theNext; + op->theNdb->releaseNdbBranch(tmp); + } + op->theFirstBranch = NULL; + op->theLastBranch = NULL; + + NdbLabel* tLabel = op->theFirstLabel; + while (tLabel != NULL) { + NdbLabel* tmp = tLabel; + tLabel = tLabel->theNext; + op->theNdb->releaseNdbLabel(tmp); + } + op->theFirstLabel = NULL; + op->theLastLabel = NULL; + + NdbCall* tCall = op->theFirstCall; + while (tCall != NULL) { + NdbCall* tmp = tCall; + tCall = tCall->theNext; + op->theNdb->releaseNdbCall(tmp); + } + op->theFirstCall = NULL; + op->theLastCall = NULL; + + NdbSubroutine* tSubroutine = op->theFirstSubroutine; + while (tSubroutine != NULL) { + NdbSubroutine* tmp = tSubroutine; + tSubroutine = tSubroutine->theNext; + op->theNdb->releaseNdbSubroutine(tmp); + } + op->theFirstSubroutine = NULL; + op->theLastSubroutine = NULL; + + op->theNoOfLabels = 0; + op->theNoOfSubroutines = 0; + + // reset AI size + op->theTotalCurrAI_Len = new_size; + op->theAI_LenInCurrAI = new_curr; + + // reset signal pointers + op->theCurrentATTRINFO = lastSignal; + op->theATTRINFOptr = &lastSignal->getDataPtrSend()[new_curr]; + + // interpreter sizes are set later somewhere + + DBUG_VOID_RETURN; +} + +static void +update(const NdbError & _err){ + NdbError & error = (NdbError &) _err; + ndberror_struct ndberror = (ndberror_struct)error; + ndberror_update(&ndberror); + error = NdbError(ndberror); +} + +const NdbError & +NdbScanFilter::getNdbError() const +{ + update(m_impl.m_error); + return m_impl.m_error; +} #if 0 diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 6e867df47a2..afbec070ac8 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -138,34 +138,9 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, } theNdbCon->theScanningOp = this; - theLockMode = lm; - - bool lockExcl, lockHoldMode, readCommitted; - switch(lm){ - case NdbScanOperation::LM_Read: - lockExcl = false; - lockHoldMode = true; - readCommitted = false; - break; - case NdbScanOperation::LM_Exclusive: - lockExcl = true; - lockHoldMode = true; - readCommitted = false; - break; - case NdbScanOperation::LM_CommittedRead: - lockExcl = false; - lockHoldMode = false; - readCommitted = true; - break; - default: - setErrorCode(4003); - return -1; - } - - m_keyInfo = ((scan_flags & SF_KeyInfo) || lockExcl) ? 1 : 0; bool tupScan = (scan_flags & SF_TupScan); -#if 1 // XXX temp for testing +#if 0 // XXX temp for testing { char* p = getenv("NDB_USE_TUPSCAN"); if (p != 0) { unsigned n = atoi(p); // 0-10 @@ -225,13 +200,13 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, Uint32 reqInfo = 0; ScanTabReq::setParallelism(reqInfo, parallel); ScanTabReq::setScanBatch(reqInfo, 0); - ScanTabReq::setLockMode(reqInfo, lockExcl); - ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode); - ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted); ScanTabReq::setRangeScanFlag(reqInfo, rangeScan); ScanTabReq::setTupScanFlag(reqInfo, tupScan); req->requestInfo = reqInfo; + m_keyInfo = (scan_flags & SF_KeyInfo) ? 1 : 0; + setReadLockMode(lm); + Uint64 transId = theNdbCon->getTransactionId(); req->transId1 = (Uint32) transId; req->transId2 = (Uint32) (transId >> 32); @@ -251,6 +226,42 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, return 0; } +void +NdbScanOperation::setReadLockMode(LockMode lockMode) +{ + bool lockExcl, lockHoldMode, readCommitted; + switch (lockMode) + { + case LM_CommittedRead: + lockExcl= false; + lockHoldMode= false; + readCommitted= true; + break; + case LM_SimpleRead: + case LM_Read: + lockExcl= false; + lockHoldMode= true; + readCommitted= false; + break; + case LM_Exclusive: + lockExcl= true; + lockHoldMode= true; + readCommitted= false; + m_keyInfo= 1; + break; + default: + /* Not supported / invalid. */ + assert(false); + } + theLockMode= lockMode; + ScanTabReq *req= CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend()); + Uint32 reqInfo= req->requestInfo; + ScanTabReq::setLockMode(reqInfo, lockExcl); + ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode); + ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted); + req->requestInfo= reqInfo; +} + int NdbScanOperation::fix_receivers(Uint32 parallel){ assert(parallel > 0); @@ -820,9 +831,12 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr, req->requestInfo = reqInfo; for(Uint32 i = 0; i<theParallelism; i++){ - m_receivers[i]->do_get_value(&theReceiver, batch_size, - key_size, - m_read_range_no); + if (m_receivers[i]->do_get_value(&theReceiver, batch_size, + key_size, + m_read_range_no)) + { + return -1; + } } return 0; } @@ -852,7 +866,6 @@ NdbScanOperation::doSendScan(int aProcessorId) tSignal = theSCAN_TABREQ; Uint32 tupKeyLen = theTupKeyLen; - Uint32 len = theTotalNrOfKeyWordInSignal; Uint32 aTC_ConnectPtr = theNdbCon->theTCConPtr; Uint64 transId = theNdbCon->theTransactionId; @@ -860,6 +873,10 @@ NdbScanOperation::doSendScan(int aProcessorId) // sending it. This could not be done in openScan because // we created the ATTRINFO signals after the SCAN_TABREQ signal. ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend()); + if (unlikely(theTotalCurrAI_Len > ScanTabReq::MaxTotalAttrInfo)) { + setErrorCode(4257); + return -1; + } req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len; Uint32 tmp = req->requestInfo; ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_); @@ -995,6 +1012,7 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans) newOp->theTupKeyLen = len; newOp->theOperationType = opType; + newOp->m_abortOption = AbortOnError; switch (opType) { case (ReadRequest): newOp->theLockMode = theLockMode; @@ -1189,7 +1207,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, Uint32 tupKeyLen = theTupKeyLen; union { Uint32 tempData[2000]; - Uint64 __align; + Uint64 __my_align; }; Uint64 *valPtr; if(remaining > totalLen && aligned && nobytes){ @@ -1225,7 +1243,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, * so it's safe to use [tIndexAttrId] * (instead of looping as is NdbOperation::equal_impl) */ - if(type == BoundEQ && tDistrKey) + if(type == BoundEQ && tDistrKey && !m_multi_range) { theNoOfTupKeyLeft--; return handle_distribution_key(valPtr, sizeInWords); @@ -1298,7 +1316,7 @@ NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size) } pos += rem; } - DBUG_DUMP("key", (char*)data, size << 2); + DBUG_DUMP("key", (uchar*) data, size << 2); DBUG_RETURN(size); } @@ -1311,7 +1329,8 @@ NdbIndexScanOperation::readTuples(LockMode lm, const bool order_by = scan_flags & SF_OrderBy; const bool order_desc = scan_flags & SF_Descending; const bool read_range_no = scan_flags & SF_ReadRangeNo; - + m_multi_range = scan_flags & SF_MultiRange; + int res = NdbScanOperation::readTuples(lm, scan_flags, parallel, batch); if(!res && read_range_no) { @@ -1361,8 +1380,6 @@ NdbIndexScanOperation::fix_get_values(){ Uint32 cnt = m_accessTable->getNoOfColumns() - 1; assert(cnt < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY); - const NdbIndexImpl * idx = m_accessTable->m_index; - const NdbTableImpl * tab = m_currentTable; for(Uint32 i = 0; i<cnt; i++){ Uint32 val = theTupleKeyDefined[i][0]; switch(val){ @@ -1794,6 +1811,12 @@ NdbIndexScanOperation::reset_bounds(bool forceSend){ int NdbIndexScanOperation::end_of_bound(Uint32 no) { + DBUG_ENTER("end_of_bound"); + DBUG_PRINT("info", ("Range number %u", no)); + /* Check that SF_MultiRange has been specified if more + than one range is specified */ + if (no > 0 && !m_multi_range) + DBUG_RETURN(-1); if(no < (1 << 13)) // Only 12-bits no of ranges { Uint32 bound_head = * m_first_bound_word; @@ -1802,9 +1825,9 @@ NdbIndexScanOperation::end_of_bound(Uint32 no) m_first_bound_word = theKEYINFOptr + theTotalNrOfKeyWordInSignal;; m_this_bound_start = theTupKeyLen; - return 0; + DBUG_RETURN(0); } - return -1; + DBUG_RETURN(-1); } int diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp index 1930c9af1d5..bc59df722aa 100644 --- a/storage/ndb/src/ndbapi/NdbTransaction.cpp +++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp @@ -56,7 +56,6 @@ NdbTransaction::NdbTransaction( Ndb* aNdb ) : theCompletedLastOp(NULL), theNoOfOpSent(0), theNoOfOpCompleted(0), - theNoOfOpFetched(0), theMyRef(0), theTCConPtr(0), theTransactionId(0), @@ -82,6 +81,7 @@ NdbTransaction::NdbTransaction( Ndb* aNdb ) : { theListState = NotInList; theError.code = 0; + //theId = NdbObjectIdMap::InvalidId; theId = theNdb->theImpl->theNdbObjectIdMap.map(this); #define CHECK_SZ(mask, sz) assert((sizeof(mask)/sizeof(mask[0])) == sz) @@ -107,7 +107,7 @@ void init(); Remark: Initialise connection object for new transaction. *****************************************************************************/ -void +int NdbTransaction::init() { theListState = NotInList; @@ -131,7 +131,6 @@ NdbTransaction::init() theNdb->theImpl->m_ndb_cluster_connection.get_latest_trans_gci(); theCommitStatus = Started; theCompletionStatus = NotCompleted; - m_abortOption = AbortOnError; theError.code = 0; theErrorLine = 0; @@ -150,6 +149,17 @@ NdbTransaction::init() // theBlobFlag = false; thePendingBlobOps = 0; + if (theId == NdbObjectIdMap::InvalidId) + { + theId = theNdb->theImpl->theNdbObjectIdMap.map(this); + if (theId == NdbObjectIdMap::InvalidId) + { + theError.code = 4000; + return -1; + } + } + return 0; + }//NdbTransaction::init() /***************************************************************************** @@ -176,12 +186,9 @@ void NdbTransaction::setOperationErrorCodeAbort(int error, int abortOption) { DBUG_ENTER("NdbTransaction::setOperationErrorCodeAbort"); - if (abortOption == -1) - abortOption = m_abortOption; if (theTransactionIsStarted == false) { theCommitStatus = Aborted; - } else if ((abortOption == AbortOnError) && - (theCommitStatus != Committed) && + } else if ((theCommitStatus != Committed) && (theCommitStatus != Aborted)) { theCommitStatus = NeedAbort; }//if @@ -263,8 +270,8 @@ Remark: Initialise connection object for new transaction. *****************************************************************************/ int NdbTransaction::execute(ExecType aTypeOfExec, - AbortOption abortOption, - int forceSend) + NdbOperation::AbortOption abortOption, + int forceSend) { NdbError savedError= theError; DBUG_ENTER("NdbTransaction::execute"); @@ -354,40 +361,14 @@ NdbTransaction::execute(ExecType aTypeOfExec, theCompletedLastOp = NULL; } - if (executeNoBlobs(tExecType, abortOption, forceSend) == -1) + if (executeNoBlobs(tExecType, + NdbOperation::DefaultAbortOption, + forceSend) == -1) { - ret = -1; if(savedError.code==0) savedError= theError; - /** - * If AO_IgnoreError, error codes arent always set on individual - * operations, making postExecute impossible - */ - if (abortOption == AO_IgnoreError) - { - if (theCompletedFirstOp != NULL) - { - if (tCompletedFirstOp != NULL) - { - tCompletedLastOp->next(theCompletedFirstOp); - theCompletedFirstOp = tCompletedFirstOp; - } - } - else - { - theCompletedFirstOp = tCompletedFirstOp; - theCompletedLastOp = tCompletedLastOp; - } - if (tPrepOp != NULL && tRestOp != NULL) { - if (theFirstOpInList == NULL) - theFirstOpInList = tRestOp; - else - theLastOpInList->next(tRestOp); - theLastOpInList = tLastOp; - } - DBUG_RETURN(-1); - } + DBUG_RETURN(-1); } #ifdef ndb_api_crash_on_complex_blob_abort @@ -447,9 +428,9 @@ NdbTransaction::execute(ExecType aTypeOfExec, } int -NdbTransaction::executeNoBlobs(ExecType aTypeOfExec, - AbortOption abortOption, - int forceSend) +NdbTransaction::executeNoBlobs(NdbTransaction::ExecType aTypeOfExec, + NdbOperation::AbortOption abortOption, + int forceSend) { DBUG_ENTER("NdbTransaction::executeNoBlobs"); DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d", @@ -472,12 +453,27 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec, while (1) { int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend); if (noOfComp == 0) { - /** - * This timeout situation can occur if NDB crashes. + /* + * Just for fun, this is only one of two places where + * we could hit this error... It's quite possible we + * hit it in Ndbif.cpp in Ndb::check_send_timeout() + * + * We behave rather similarly in both places. + * Hitting this is certainly a bug though... */ - ndbout << "This timeout should never occur, execute(..)" << endl; - theError.code = 4012; - setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure" + g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for " + "response from NDB data nodes. This should NEVER " + "occur. You have likely hit a NDB Bug. Please " + "file a bug."); + DBUG_PRINT("error",("This timeout should never occure, execute()")); + g_eventLogger.error("Forcibly trying to rollback txn (%p" + ") to try to clean up data node resources.", + this); + executeNoBlobs(NdbTransaction::Rollback); + theError.code = 4012; + theError.status= NdbError::PermanentError; + theError.classification= NdbError::TimeoutExpired; + setOperationErrorCodeAbort(4012); // ndbd timeout DBUG_RETURN(-1); }//if @@ -527,10 +523,10 @@ Parameters : aTypeOfExec: Type of execute. Remark: Prepare a part of a transaction in an asynchronous manner. *****************************************************************************/ void -NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, +NdbTransaction::executeAsynchPrepare(NdbTransaction::ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - AbortOption abortOption) + NdbOperation::AbortOption abortOption) { DBUG_ENTER("NdbTransaction::executeAsynchPrepare"); DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: 0x%lx, anyObject: Ox%lx", @@ -541,7 +537,12 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, */ if (theError.code != 0) DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code)); - theError.code = 0; + /** + * for timeout (4012) we want sendROLLBACK to behave differently. + * Else, normal behaviour of reset errcode + */ + if (theError.code != 4012) + theError.code = 0; NdbScanOperation* tcOp = m_theFirstScanOperation; if (tcOp != 0){ // Execute any cursor operations @@ -570,7 +571,6 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, theReturnStatus = ReturnSuccess; theCallbackFunction = aCallback; theCallbackObject = anyObject; - m_abortOption = abortOption; m_waitForReply = true; tNdb->thePreparedTransactionsArray[tnoOfPreparedTransactions] = this; theTransArrayIndex = tnoOfPreparedTransactions; @@ -665,8 +665,7 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, while (tOp) { int tReturnCode; NdbOperation* tNextOp = tOp->next(); - - tReturnCode = tOp->prepareSend(theTCConPtr, theTransactionId); + tReturnCode = tOp->prepareSend(theTCConPtr, theTransactionId, abortOption); if (tReturnCode == -1) { theSendStatus = sendABORTfail; DBUG_VOID_RETURN; @@ -701,7 +700,7 @@ void NdbTransaction::executeAsynch(ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - AbortOption abortOption, + NdbOperation::AbortOption abortOption, int forceSend) { executeAsynchPrepare(aTypeOfExec, aCallback, anyObject, abortOption); @@ -864,6 +863,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal; tSignal.setData(theTCConPtr, 1); tSignal.setData(tTransId1, 2); tSignal.setData(tTransId2, 3); + if(theError.code == 4012) + { + g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag"); + tSignal.setLength(tSignal.getLength() + 1); // + flags + tSignal.setData(0x1, 4); // potentially bad data + } tReturnCode = tp->sendSignal(&tSignal,theDBnode); if (tReturnCode != -1) { theSendStatus = sendTC_ROLLBACK; @@ -1782,8 +1787,8 @@ from other transactions. const Uint32 tAttrInfoLen = *tPtr++; if (tOp && tOp->checkMagicNumber()) { Uint32 done = tOp->execTCOPCONF(tAttrInfoLen); - if(tAttrInfoLen > TcKeyConf::SimpleReadBit){ - Uint32 node = tAttrInfoLen & (~TcKeyConf::SimpleReadBit); + if(tAttrInfoLen > TcKeyConf::DirtyReadBit){ + Uint32 node = tAttrInfoLen & (~TcKeyConf::DirtyReadBit); NdbNodeBitmask::set(m_db_nodes, node); if(NdbNodeBitmask::get(m_failed_db_nodes, node) && !done) { @@ -1810,14 +1815,8 @@ from other transactions. } } else if ((tNoComp >= tNoSent) && (theLastExecOpInList->theCommitIndicator == 1)){ - - - if (m_abortOption == AO_IgnoreError && theError.code != 0){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } + + /**********************************************************************/ // We sent the transaction with Commit flag set and received a CONF with // no Commit flag set. This is clearly an anomaly. @@ -1991,13 +1990,6 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf, } else if ((tNoComp >= tNoSent) && (theLastExecOpInList->theCommitIndicator == 1)){ - if (m_abortOption == AO_IgnoreError && theError.code != 0){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } - /**********************************************************************/ // We sent the transaction with Commit flag set and received a CONF with // no Commit flag set. This is clearly an anomaly. @@ -2021,41 +2013,6 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf, return -1; }//NdbTransaction::receiveTCINDXCONF() -/***************************************************************************** -int receiveTCINDXREF( NdbApiSignal* aSignal) - -Return Value: Return 0 : send was succesful. - Return -1: In all other case. -Parameters: aSignal: the signal object that contains the - TCINDXREF signal from TC. -Remark: Handles the reception of the TCINDXREF signal. -*****************************************************************************/ -int -NdbTransaction::receiveTCINDXREF( NdbApiSignal* aSignal) -{ - if(checkState_TransId(aSignal->getDataPtr()+1)){ - theError.code = aSignal->readData(4); // Override any previous errors - - /**********************************************************************/ - /* A serious error has occured. This could be due to deadlock or */ - /* lack of resources or simply a programming error in NDB. This */ - /* transaction will be aborted. Actually it has already been */ - /* and we only need to report completion and return with the */ - /* error code to the application. */ - /**********************************************************************/ - theCompletionStatus = NdbTransaction::CompletedFailure; - theCommitStatus = NdbTransaction::Aborted; - theReturnStatus = NdbTransaction::ReturnFailure; - return 0; - } else { -#ifdef NDB_NO_DROPPED_SIGNAL - abort(); -#endif - } - - return -1; -}//NdbTransaction::receiveTCINDXREF() - /******************************************************************************* int OpCompletedFailure(); @@ -2065,36 +2022,15 @@ Parameters: aErrorCode: The error code. Remark: An operation was completed with failure. *******************************************************************************/ int -NdbTransaction::OpCompleteFailure(Uint8 abortOption, bool setFailure) +NdbTransaction::OpCompleteFailure(NdbOperation* op) { Uint32 tNoComp = theNoOfOpCompleted; Uint32 tNoSent = theNoOfOpSent; - if (setFailure) - theCompletionStatus = NdbTransaction::CompletedFailure; + tNoComp++; theNoOfOpCompleted = tNoComp; - if (tNoComp == tNoSent) { - //------------------------------------------------------------------------ - //If the transaction consists of only simple reads we can set - //Commit state Aborted. Otherwise this simple operation cannot - //decide the success of the whole transaction since a simple - //operation is not really part of that transaction. - //------------------------------------------------------------------------ - if (abortOption == AO_IgnoreError){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } - - return 0; // Last operation received - } else if (tNoComp > tNoSent) { - setOperationErrorCodeAbort(4113); // Too many operations, - // stop waiting for more - return 0; - } else { - return -1; // Continue waiting for more signals - }//if + + return (tNoComp == tNoSent) ? 0 : -1; }//NdbTransaction::OpCompleteFailure() /****************************************************************************** @@ -2246,7 +2182,7 @@ NdbTransaction::report_node_failure(Uint32 id){ * 4) X X */ NdbOperation* tmp = theFirstExecOpInList; - const Uint32 len = TcKeyConf::SimpleReadBit | id; + const Uint32 len = TcKeyConf::DirtyReadBit | id; Uint32 tNoComp = theNoOfOpCompleted; Uint32 tNoSent = theNoOfOpSent; Uint32 count = 0; diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp index fba40659ec7..aa50a87dea8 100644 --- a/storage/ndb/src/ndbapi/Ndbif.cpp +++ b/storage/ndb/src/ndbapi/Ndbif.cpp @@ -940,8 +940,9 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) InvalidSignal: #ifdef VM_TRACE ndbout_c("Ndbif: Error Ndb::handleReceivedSignal " - "(GSN=%d, theImpl->theWaiter.m_state=%d)" + "(tFirstDataPtr=%p, GSN=%d, theImpl->theWaiter.m_state=%d)" " sender = (Block: %d Node: %d)", + tFirstDataPtr, tSignalNumber, tWaitState, refToBlock(aSignal->theSendersBlockRef), diff --git a/storage/ndb/src/ndbapi/Ndblist.cpp b/storage/ndb/src/ndbapi/Ndblist.cpp index 6a8fabbcb30..a9e9124edd0 100644 --- a/storage/ndb/src/ndbapi/Ndblist.cpp +++ b/storage/ndb/src/ndbapi/Ndblist.cpp @@ -74,7 +74,10 @@ Ndb::checkFailedNode() int Ndb::createConIdleList(int aNrOfCon) { - theImpl->theConIdleList.fill(this, aNrOfCon); + if (theImpl->theConIdleList.fill(this, aNrOfCon)) + { + return -1; + } return aNrOfCon; } @@ -90,7 +93,10 @@ Ndb::createConIdleList(int aNrOfCon) int Ndb::createOpIdleList(int aNrOfOp) { - theImpl->theOpIdleList.fill(this, aNrOfOp); + if (theImpl->theOpIdleList.fill(this, aNrOfOp)) + { + return -1; + } return aNrOfOp; } diff --git a/storage/ndb/src/ndbapi/ObjectMap.cpp b/storage/ndb/src/ndbapi/ObjectMap.cpp new file mode 100644 index 00000000000..c87911a10d4 --- /dev/null +++ b/storage/ndb/src/ndbapi/ObjectMap.cpp @@ -0,0 +1,62 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "ObjectMap.hpp" + +NdbObjectIdMap::NdbObjectIdMap(NdbMutex* mutex, Uint32 sz, Uint32 eSz) +{ + m_size = 0; + m_firstFree = InvalidId; + m_map = 0; + m_mutex = mutex; + m_expandSize = eSz; + expand(sz); +#ifdef DEBUG_OBJECTMAP + ndbout_c("NdbObjectIdMap:::NdbObjectIdMap(%u)", sz); +#endif +} + +NdbObjectIdMap::~NdbObjectIdMap() +{ + free(m_map); +} + +int NdbObjectIdMap::expand(Uint32 incSize) +{ + NdbMutex_Lock(m_mutex); + Uint32 newSize = m_size + incSize; + MapEntry * tmp = (MapEntry*)realloc(m_map, newSize * sizeof(MapEntry)); + + if (likely(tmp != 0)) + { + m_map = tmp; + + for(Uint32 i = m_size; i < newSize; i++){ + m_map[i].m_next = i + 1; + } + m_firstFree = m_size; + m_map[newSize-1].m_next = InvalidId; + m_size = newSize; + } + else + { + NdbMutex_Unlock(m_mutex); + g_eventLogger.error("NdbObjectIdMap::expand: realloc(%u*%u) failed", + newSize, sizeof(MapEntry)); + return -1; + } + NdbMutex_Unlock(m_mutex); + return 0; +} diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp index 13f9be66c24..bc171649840 100644 --- a/storage/ndb/src/ndbapi/ObjectMap.hpp +++ b/storage/ndb/src/ndbapi/ObjectMap.hpp @@ -20,6 +20,9 @@ //#include <NdbMutex.h> #include <NdbOut.hpp> +#include <EventLogger.hpp> +extern EventLogger g_eventLogger; + //#define DEBUG_OBJECTMAP /** @@ -46,36 +49,17 @@ private: } * m_map; NdbMutex * m_mutex; - void expand(Uint32 newSize); + int expand(Uint32 newSize); }; inline -NdbObjectIdMap::NdbObjectIdMap(NdbMutex* mutex, Uint32 sz, Uint32 eSz) { - m_size = 0; - m_firstFree = InvalidId; - m_map = 0; - m_mutex = mutex; - m_expandSize = eSz; - expand(sz); -#ifdef DEBUG_OBJECTMAP - ndbout_c("NdbObjectIdMap:::NdbObjectIdMap(%u)", sz); -#endif -} - -inline -NdbObjectIdMap::~NdbObjectIdMap(){ - free(m_map); -} - -inline Uint32 NdbObjectIdMap::map(void * object){ // lock(); - if(m_firstFree == InvalidId){ - expand(m_expandSize); - } + if(m_firstFree == InvalidId && expand(m_expandSize)) + return InvalidId; Uint32 ff = m_firstFree; m_firstFree = m_map[ff].m_next; @@ -101,8 +85,8 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){ m_map[i].m_next = m_firstFree; m_firstFree = i; } else { - ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%lx) obj=0x%lx", - id, (long) object, (long) obj); + g_eventLogger.error("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", + id, (long) object, (long) obj); DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx", id, (long) object, (long) obj)); return 0; @@ -126,29 +110,4 @@ NdbObjectIdMap::getObject(Uint32 id){ } return 0; } - -inline void -NdbObjectIdMap::expand(Uint32 incSize){ - NdbMutex_Lock(m_mutex); - Uint32 newSize = m_size + incSize; - MapEntry * tmp = (MapEntry*)realloc(m_map, newSize * sizeof(MapEntry)); - - if (likely(tmp != 0)) - { - m_map = tmp; - - for(Uint32 i = m_size; i<newSize; i++){ - m_map[i].m_next = i + 1; - } - m_firstFree = m_size; - m_map[newSize-1].m_next = InvalidId; - m_size = newSize; - } - else - { - ndbout_c("NdbObjectIdMap::expand unable to expand!!"); - } - NdbMutex_Unlock(m_mutex); -} - #endif diff --git a/storage/ndb/src/ndbapi/SignalSender.cpp b/storage/ndb/src/ndbapi/SignalSender.cpp index 7f1958e9b1f..70e65200942 100644 --- a/storage/ndb/src/ndbapi/SignalSender.cpp +++ b/storage/ndb/src/ndbapi/SignalSender.cpp @@ -19,13 +19,6 @@ #include <signaldata/NFCompleteRep.hpp> #include <signaldata/NodeFailRep.hpp> -static -void -require(bool x) -{ - if (!x) - abort(); -} SimpleSignal::SimpleSignal(bool dealloc){ memset(this, 0, sizeof(* this)); @@ -144,7 +137,10 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t) { SimpleSignal * s = t.check(m_jobBuffer); if(s != 0){ - m_usedBuffer.push_back(s); + if (m_usedBuffer.push_back(s)) + { + return 0; + } return s; } @@ -159,7 +155,10 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t) SimpleSignal * s = t.check(m_jobBuffer); if(s != 0){ - m_usedBuffer.push_back(s); + if (m_usedBuffer.push_back(s)) + { + return 0; + } return s; } @@ -172,6 +171,7 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t) class WaitForAny { public: + WaitForAny() {} SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){ if(m_jobBuffer.size() > 0){ SimpleSignal * s = m_jobBuffer[0]; @@ -191,6 +191,7 @@ SignalSender::waitFor(Uint32 timeOutMillis){ class WaitForNode { public: + WaitForNode() {} Uint32 m_nodeId; SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){ Uint32 len = m_jobBuffer.size(); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 19b384d8dc2..eabfc6bc371 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -1379,7 +1379,7 @@ int PollGuard::wait_scan(int wait_time, NodeId nodeId, bool forceSend) int PollGuard::wait_for_input_in_loop(int wait_time, bool forceSend) { - int ret_val, response_time; + int ret_val; if (forceSend) m_tp->forceSend(m_block_no); else @@ -1403,6 +1403,9 @@ int PollGuard::wait_for_input_in_loop(int wait_time, bool forceSend) } if (wait_time == -1) { +#ifdef NOT_USED + ndbout << "Waited WAITFOR_RESPONSE_TIMEOUT, continuing wait" << endl; +#endif continue; } wait_time= max_time - NdbTick_CurrentMillisecond(); @@ -1438,7 +1441,7 @@ void PollGuard::wait_for_input(int wait_time) queue if it hasn't happened already. It is usually already out of the queue but at time-out it could be that the object is still there. */ - Uint32 cond_wait_index= m_tp->put_in_cond_wait_queue(m_waiter); + (void) m_tp->put_in_cond_wait_queue(m_waiter); m_waiter->wait(wait_time); if (m_waiter->get_cond_wait_index() != TransporterFacade::MAX_NO_THREADS) { diff --git a/storage/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp index d19974f8999..23fea8792f7 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.hpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp @@ -365,7 +365,8 @@ inline bool TransporterFacade::get_node_stopping(NodeId n) const { const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n); - return ((node.m_state.startLevel == NodeState::SL_STOPPING_1) || + return (!node.m_state.getSingleUserMode() && + (node.m_state.startLevel == NodeState::SL_STOPPING_1) || (node.m_state.startLevel == NodeState::SL_STOPPING_2)); } @@ -376,16 +377,9 @@ TransporterFacade::getIsNodeSendable(NodeId n) const { const Uint32 startLevel = node.m_state.startLevel; if (node.m_info.m_type == NodeInfo::DB) { - if(node.m_state.singleUserMode && - ownId() == node.m_state.singleUserApi) { - return (node.compatible && - (node.m_state.startLevel == NodeState::SL_STOPPING_1 || - node.m_state.startLevel == NodeState::SL_STARTED || - node.m_state.startLevel == NodeState::SL_SINGLEUSER)); - } - else - return node.compatible && (startLevel == NodeState::SL_STARTED || - startLevel == NodeState::SL_STOPPING_1); + return node.compatible && (startLevel == NodeState::SL_STARTED || + startLevel == NodeState::SL_STOPPING_1 || + node.m_state.getSingleUserMode()); } else { ndbout_c("TransporterFacade::getIsNodeSendable: Illegal node type: " "%d of node: %d", diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 66e3fe92e81..38744fbdeba 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -39,6 +39,8 @@ EventLogger g_eventLogger; NdbMutex *ndb_print_state_mutex= NULL; #endif +static int g_ndb_connection_count = 0; + /* * Ndb_cluster_connection */ @@ -326,6 +328,8 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char * NdbColumnImpl::create_pseudo("NDB$ROWID"); NdbDictionary::Column::ROW_GCI= NdbColumnImpl::create_pseudo("NDB$ROW_GCI"); + NdbDictionary::Column::ANY_VALUE= + NdbColumnImpl::create_pseudo("NDB$ANY_VALUE"); NdbDictionary::Column::COPY_ROWID= NdbColumnImpl::create_pseudo("NDB$COPY_ROWID"); } @@ -382,6 +386,7 @@ Ndb_cluster_connection_impl::~Ndb_cluster_connection_impl() delete NdbDictionary::Column::RECORDS_IN_RANGE; delete NdbDictionary::Column::ROWID; delete NdbDictionary::Column::ROW_GCI; + delete NdbDictionary::Column::ANY_VALUE; NdbDictionary::Column::FRAGMENT= 0; NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= 0; NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= 0; @@ -393,6 +398,7 @@ Ndb_cluster_connection_impl::~Ndb_cluster_connection_impl() NdbDictionary::Column::RECORDS_IN_RANGE= 0; NdbDictionary::Column::ROWID= 0; NdbDictionary::Column::ROW_GCI= 0; + NdbDictionary::Column::ANY_VALUE= 0; delete NdbDictionary::Column::COPY_ROWID; NdbDictionary::Column::COPY_ROWID = 0; @@ -418,7 +424,7 @@ Ndb_cluster_connection_impl::set_name(const char *name) } } -void +int Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config) @@ -464,7 +470,10 @@ Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid, break; } } - m_impl.m_all_nodes.push_back(Node(group,remoteNodeId)); + if (m_impl.m_all_nodes.push_back(Node(group,remoteNodeId))) + { + DBUG_RETURN(-1); + } DBUG_PRINT("info",("saved %d %d", group,remoteNodeId)); for (int i= m_impl.m_all_nodes.size()-2; i >= 0 && m_impl.m_all_nodes[i].group > m_impl.m_all_nodes[i+1].group; @@ -511,7 +520,7 @@ Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid, do_test(); #endif - DBUG_VOID_RETURN; + DBUG_RETURN(0); } void @@ -577,7 +586,6 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds, struct ndb_mgm_reply mgm_reply; DBUG_ENTER("Ndb_cluster_connection::connect"); - const char* error = 0; do { if (m_impl.m_config_retriever == 0) DBUG_RETURN(-1); @@ -595,7 +603,11 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds, break; m_impl.m_transporter_facade->start_instance(nodeId, props); - m_impl.init_nodes_vector(nodeId, *props); + if (m_impl.init_nodes_vector(nodeId, *props)) + { + ndbout_c("Ndb_cluster_connection::connect: malloc failure"); + DBUG_RETURN(-1); + } for(unsigned i=0; i<m_impl.m_transporter_facade->get_registry()->m_transporter_interface.size(); @@ -670,5 +682,12 @@ Ndb_cluster_connection::get_active_ndb_objects() const { return m_impl.m_transporter_facade->get_active_ndb_objects(); } + +int Ndb_cluster_connection::set_timeout(int timeout_ms) +{ + return ndb_mgm_set_timeout(m_impl.m_config_retriever->get_mgmHandle(), + timeout_ms); +} + template class Vector<Ndb_cluster_connection_impl::Node>; diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp index 33346a2a1d7..ba488c56ec7 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp @@ -22,7 +22,6 @@ #include <NdbMutex.h> extern NdbMutex *g_ndb_connection_mutex; -static int g_ndb_connection_count = 0; class TransporterFacade; class ConfigRetriever; @@ -67,7 +66,7 @@ private: }; Vector<Node> m_all_nodes; - void init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config); + int init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config); void connect_thread(); void set_name(const char *name); diff --git a/storage/ndb/src/ndbapi/ndb_internal.hpp b/storage/ndb/src/ndbapi/ndb_internal.hpp new file mode 100644 index 00000000000..f5f37f95a04 --- /dev/null +++ b/storage/ndb/src/ndbapi/ndb_internal.hpp @@ -0,0 +1,26 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "NdbImpl.hpp" + +class Ndb_internal +{ +private: + friend class NdbEventBuffer; + Ndb_internal() {} + virtual ~Ndb_internal() {} + static int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length) + { return ndb->theImpl->send_event_report(data, length); } +}; diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 0587fac7e8a..0ad2faff76a 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -19,6 +19,9 @@ #include <ndberror.h> #include <m_string.h> +#include "../mgmsrv/ndb_mgmd_error.h" + + typedef struct ErrorBundle { int code; int mysql_code; @@ -151,7 +154,7 @@ ErrorBundle ErrorCodes[] = { */ { 4007, DMEC, UR, "Send to ndbd node failed" }, { 4008, DMEC, UR, "Receive from NDB failed" }, - { 4009, DMEC, UR, "Cluster Failure" }, + { 4009, HA_ERR_NO_CONNECTION, UR, "Cluster Failure" }, { 4012, DMEC, UR, "Request ndbd time-out, maybe due to high load or communication problems"}, { 4013, DMEC, UR, "Request timed out in waiting for node failure"}, @@ -186,6 +189,7 @@ ErrorBundle ErrorCodes[] = { { 4032, DMEC, TR, "Out of Send Buffer space in NDB API" }, { 1501, DMEC, TR, "Out of undo space" }, { 288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" }, + { 289, DMEC, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" }, /** * InsufficientSpace @@ -201,7 +205,8 @@ ErrorBundle ErrorCodes[] = { { 904, HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" }, { 905, DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" }, { 1601, HA_ERR_RECORD_FILE_FULL, IS, "Out extents, tablespace full" }, - + { 1602, DMEC, IS,"No datafile in tablespace" }, + /** * TimeoutExpired */ @@ -285,6 +290,7 @@ ErrorBundle ErrorCodes[] = { /** * Application error */ + { 281, HA_ERR_NO_CONNECTION, AE, "Operation not allowed due to cluster shutdown in progress" }, { 299, DMEC, AE, "Operation not allowed or aborted due to single user mode" }, { 763, DMEC, AE, "Alter table requires cluster nodes to have exact same version" }, { 823, DMEC, AE, "Too much attrinfo from application in tuple manager" }, @@ -370,7 +376,7 @@ ErrorBundle ErrorCodes[] = { { 771, HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" }, { 772, HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" }, { 749, HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" }, - { 763, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" }, + { 779, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" }, { 764, HA_WRONG_CREATE_OPTION, SE, "Invalid extent size" }, { 765, DMEC, SE, "Out of filegroup records" }, { 750, IE, SE, "Invalid file type" }, @@ -618,6 +624,34 @@ ErrorBundle ErrorCodes[] = { { 4273, DMEC, IE, "No blob table in dict cache" }, { 4274, DMEC, IE, "Corrupted main table PK in blob operation" }, { 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" }, + { 4294, DMEC, AE, "Scan filter is too large, discarded" }, + + { NO_CONTACT_WITH_PROCESS, DMEC, AE, + "No contact with the process (dead ?)."}, + { WRONG_PROCESS_TYPE, DMEC, AE, + "The process has wrong type. Expected a DB process."}, + { SEND_OR_RECEIVE_FAILED, DMEC, AE, + "Send to process or receive failed."}, + { INVALID_ERROR_NUMBER, DMEC, AE, + "Invalid error number. Should be >= 0."}, + { INVALID_TRACE_NUMBER, DMEC, AE, + "Invalid trace number."}, + { INVALID_BLOCK_NAME, DMEC, AE, + "Invalid block name"}, + { NODE_SHUTDOWN_IN_PROGESS, DMEC, AE, + "Node shutdown in progress" }, + { SYSTEM_SHUTDOWN_IN_PROGRESS, DMEC, AE, + "System shutdown in progress" }, + { NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, DMEC, AE, + "Node shutdown would cause system crash" }, + { UNSUPPORTED_NODE_SHUTDOWN, DMEC, AE, + "Unsupported multi node shutdown. Abort option required." }, + { NODE_NOT_API_NODE, DMEC, AE, + "The specified node is not an API node." }, + { OPERATION_NOT_ALLOWED_START_STOP, DMEC, AE, + "Operation not allowed while nodes are starting or stopping."}, + { NO_CONTACT_WITH_DB_NODES, DMEC, AE, + "No contact with database nodes" } }; static diff --git a/storage/ndb/src/ndbapi/ndberror_check.c b/storage/ndb/src/ndbapi/ndberror_check.c new file mode 100644 index 00000000000..6986d99f3d4 --- /dev/null +++ b/storage/ndb/src/ndbapi/ndberror_check.c @@ -0,0 +1,38 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <stdio.h> +#include "ndberror.c" + +int main() +{ + int i, j, error = 0; + + /* check for duplicate error codes */ + for(i = 0; i < NbErrorCodes; i++) + { + for(j = i + 1; j < NbErrorCodes; j++) + { + if (ErrorCodes[i].code == ErrorCodes[j].code) + { + fprintf(stderr, "Duplicate error code %u\n", ErrorCodes[i].code); + error = 1; + } + } + } + if (error) + return -1; + return 0; +} diff --git a/storage/ndb/test/include/CpcClient.hpp b/storage/ndb/test/include/CpcClient.hpp index 62f016f8e1d..a743499566f 100644 --- a/storage/ndb/test/include/CpcClient.hpp +++ b/storage/ndb/test/include/CpcClient.hpp @@ -70,8 +70,6 @@ private: char *host; int port; NDB_SOCKET_TYPE cpc_sock; - InputStream *cpc_in; - OutputStream *cpc_out; public: int connect(); diff --git a/storage/ndb/test/include/HugoOperations.hpp b/storage/ndb/test/include/HugoOperations.hpp index 1f3290e8ce1..91e593a2b26 100644 --- a/storage/ndb/test/include/HugoOperations.hpp +++ b/storage/ndb/test/include/HugoOperations.hpp @@ -109,8 +109,8 @@ public: NDBT_ResultRow& get_row(Uint32 idx) { return *rows[idx];} - int execute_async(Ndb*, NdbTransaction::ExecType, NdbTransaction::AbortOption = NdbTransaction::AbortOnError); - int execute_async_prepare(Ndb*, NdbTransaction::ExecType, NdbTransaction::AbortOption = NdbTransaction::AbortOnError); + int execute_async(Ndb*, NdbTransaction::ExecType, NdbOperation::AbortOption = NdbOperation::AbortOnError); + int execute_async_prepare(Ndb*, NdbTransaction::ExecType, NdbOperation::AbortOption = NdbOperation::AbortOnError); int wait_async(Ndb*, int timeout = -1); diff --git a/storage/ndb/test/include/NDBT_Error.hpp b/storage/ndb/test/include/NDBT_Error.hpp index 352f5926eeb..faec0cdadfc 100644 --- a/storage/ndb/test/include/NDBT_Error.hpp +++ b/storage/ndb/test/include/NDBT_Error.hpp @@ -77,8 +77,8 @@ private: }; // -// ERR prints an NdbError object togheter with a description of where -// the error occured +// ERR prints an NdbError object together with a description of where the +// error occured // #define ERR_OUT(where, error) \ { where << "ERROR: " << error.code << " " \ diff --git a/storage/ndb/test/include/NDBT_ReturnCodes.h b/storage/ndb/test/include/NDBT_ReturnCodes.h index 8660c0828f4..b48fccdb12d 100644 --- a/storage/ndb/test/include/NDBT_ReturnCodes.h +++ b/storage/ndb/test/include/NDBT_ReturnCodes.h @@ -26,7 +26,7 @@ extern "C" { #define NDBT_TEMPORARY 3 /** * NDBT_ProgramExit - * This function will print the returncode togheter with a prefix on + * This function will print the returncode together with a prefix on * the screen and then exit the test program. * Call this function when exiting the main function in your test programs * Returns the return code diff --git a/storage/ndb/test/include/NDBT_Test.hpp b/storage/ndb/test/include/NDBT_Test.hpp index 46d27ec1c3c..ad09f04e814 100644 --- a/storage/ndb/test/include/NDBT_Test.hpp +++ b/storage/ndb/test/include/NDBT_Test.hpp @@ -333,6 +333,12 @@ public: // supply argc and argv as parameters int execute(int, const char**); + // NDBT's test tables are fixed and it always create + // and drop fixed table when execute, add this method + // in order to run CTX only and adapt to some new + // customized testsuite + int executeOneCtx(Ndb_cluster_connection&, + const NdbDictionary::Table* ptab, const char* testname = NULL); // These function can be used from main in the test program // to control the behaviour of the testsuite diff --git a/storage/ndb/test/include/NdbMixRestarter.hpp b/storage/ndb/test/include/NdbMixRestarter.hpp new file mode 100644 index 00000000000..f4f91ad2b48 --- /dev/null +++ b/storage/ndb/test/include/NdbMixRestarter.hpp @@ -0,0 +1,74 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef NDBT_MIX_RESTARTER_HPP +#define NDBT_MIX_RESTARTER_HPP + +#include <mgmapi.h> +#include <Vector.hpp> +#include <BaseString.hpp> +#include "NdbRestarter.hpp" +#include "NDBT_Test.hpp" + +#define NMR_SR "SR" +#define NMR_SR_THREADS "SR_ThreadCount" +#define NMR_SR_THREADS_STOPPED "SR_ThreadsStoppedCount" +#define NMR_SR_VALIDATE_THREADS "SR_ValidateThreadCount" +#define NMR_SR_VALIDATE_THREADS_DONE "SR_ValidateThreadsDoneCount" + +class NdbMixRestarter : public NdbRestarter +{ +public: + enum RestartTypeMask + { + RTM_RestartCluster = 0x01, + RTM_RestartNode = 0x02, + RTM_RestartNodeInitial = 0x04, + RTM_StopNode = 0x08, + RTM_StopNodeInitial = 0x10, + RTM_StartNode = 0x20, + + RTM_COUNT = 6, + + RTM_ALL = 0xFF, + RTM_SR = RTM_RestartCluster, + RTM_NR = 0x2 | 0x4 | 0x8 | 0x10 | 0x20 + }; + + enum SR_State { + SR_RUNNING = 0, + SR_STOPPING = 1, + SR_STOPPED = 2, + SR_VALIDATING = 3 + }; + + NdbMixRestarter(const char* _addr = 0); + ~NdbMixRestarter(); + + void setRestartTypeMask(Uint32 mask); + int runUntilStopped(NDBT_Context* ctx, NDBT_Step* step, Uint32 freq); + int runPeriod(NDBT_Context* ctx, NDBT_Step* step, Uint32 time, Uint32 freq); + + int init(NDBT_Context* ctx, NDBT_Step* step); + int dostep(NDBT_Context* ctx, NDBT_Step* step); + int finish(NDBT_Context* ctx, NDBT_Step* step); + +private: + Uint32 m_mask; + Vector<ndb_mgm_node_state> m_nodes; + int restart_cluster(NDBT_Context* ctx, NDBT_Step* step, bool abort = true); +}; + +#endif diff --git a/storage/ndb/test/ndbapi/Makefile.am b/storage/ndb/test/ndbapi/Makefile.am index 3209a8be523..ad509dbbafe 100644 --- a/storage/ndb/test/ndbapi/Makefile.am +++ b/storage/ndb/test/ndbapi/Makefile.am @@ -39,6 +39,7 @@ testOperations \ testRestartGci \ testScan \ testInterpreter \ +testScanFilter \ testScanInterpreter \ testScanPerf \ testSystemRestart \ @@ -85,6 +86,7 @@ testOperations_SOURCES = testOperations.cpp testRestartGci_SOURCES = testRestartGci.cpp testScan_SOURCES = testScan.cpp ScanFunctions.hpp testInterpreter_SOURCES = testInterpreter.cpp +testScanFilter_SOURCES = testScanFilter.cpp testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp testScanPerf_SOURCES = testScanPerf.cpp testSystemRestart_SOURCES = testSystemRestart.cpp diff --git a/storage/ndb/test/ndbapi/benchronja.cpp b/storage/ndb/test/ndbapi/benchronja.cpp index 4973e6e2487..73ee324a888 100644 --- a/storage/ndb/test/ndbapi/benchronja.cpp +++ b/storage/ndb/test/ndbapi/benchronja.cpp @@ -41,7 +41,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 256 +#define NDB_MAXTHREADS 256 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXATTRSIZE 8000 #define START_TIMER NdbTimer timer; timer.doStart(); #define STOP_TIMER timer.doStop(); @@ -56,18 +63,18 @@ struct ThreadNdb Ndb* NdbRef; }; -static NdbThread* threadLife[MAXTHREADS]; +static NdbThread* threadLife[NDB_MAXTHREADS]; static unsigned int tNoOfThreads; static unsigned int tNoOfOpsPerExecute; static unsigned int tNoOfRecords; static unsigned int tNoOfOperations; -static int ThreadReady[MAXTHREADS]; -static int ThreadStart[MAXTHREADS]; +static int ThreadReady[NDB_MAXTHREADS]; +static int ThreadStart[NDB_MAXTHREADS]; NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){ ndb_init(); - ThreadNdb tabThread[MAXTHREADS]; + ThreadNdb tabThread[NDB_MAXTHREADS]; int i = 0 ; int cont = 0 ; Ndb* pMyNdb = NULL ; //( "TEST_DB" ); @@ -84,7 +91,7 @@ NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){ { if (strcmp(argv[i], "-t") == 0){ tNoOfThreads = atoi(argv[i+1]); - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) goto error_input; + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) goto error_input; }else if (strcmp(argv[i], "-o") == 0){ tNoOfOperations = atoi(argv[i+1]); if (tNoOfOperations < 1) goto error_input; diff --git a/storage/ndb/test/ndbapi/flexAsynch.cpp b/storage/ndb/test/ndbapi/flexAsynch.cpp index 20a157fc2f3..1f52315482f 100644 --- a/storage/ndb/test/ndbapi/flexAsynch.cpp +++ b/storage/ndb/test/ndbapi/flexAsynch.cpp @@ -35,7 +35,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 128 +#define NDB_MAXTHREADS 128 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXPAR 1024 #define MAXATTRSIZE 1000 #define PKSIZE 2 @@ -76,10 +83,10 @@ struct ThreadNdb int ThreadNo; }; -static NdbThread* threadLife[MAXTHREADS]; +static NdbThread* threadLife[NDB_MAXTHREADS]; static int tNodeId; -static int ThreadReady[MAXTHREADS]; -static StartType ThreadStart[MAXTHREADS]; +static int ThreadReady[NDB_MAXTHREADS]; +static StartType ThreadStart[NDB_MAXTHREADS]; static char tableName[MAXTABLES][MAXSTRLEN+1]; static char attrName[MAXATTR][MAXSTRLEN+1]; @@ -160,7 +167,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535) return NDBT_ProgramExit(NDBT_WRONGARGS); } - pThreadData = new ThreadNdb[MAXTHREADS]; + pThreadData = new ThreadNdb[NDB_MAXTHREADS]; ndbout << endl << "FLEXASYNCH - Starting normal mode" << endl; ndbout << "Perform benchmark of insert, update and delete transactions"; @@ -844,7 +851,7 @@ readArguments(int argc, const char** argv){ while (argc > 1){ if (strcmp(argv[i], "-t") == 0){ tNoOfThreads = atoi(argv[i+1]); - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){ + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){ ndbout_c("Invalid no of threads"); return -1; } diff --git a/storage/ndb/test/ndbapi/flexHammer.cpp b/storage/ndb/test/ndbapi/flexHammer.cpp index 9b9fd7a4a92..1b0097cf84b 100644 --- a/storage/ndb/test/ndbapi/flexHammer.cpp +++ b/storage/ndb/test/ndbapi/flexHammer.cpp @@ -66,7 +66,14 @@ ErrorData * flexHammerErrorData; #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 256 +#define NDB_MAXTHREADS 256 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXATTRSIZE 100 // Max number of retries if something fails #define MaxNoOfAttemptsC 10 @@ -119,8 +126,8 @@ static int tAttributeSize; static int tNoOfOperations; static int tNoOfRecords; static int tNoOfLoops; -static ReadyType ThreadReady[MAXTHREADS]; -static StartType ThreadStart[MAXTHREADS]; +static ReadyType ThreadReady[NDB_MAXTHREADS]; +static StartType ThreadStart[NDB_MAXTHREADS]; static char tableName[MAXTABLES][MAXSTRLEN]; static char attrName[MAXATTR][MAXSTRLEN]; static int theSimpleFlag = 0; @@ -640,7 +647,7 @@ readArguments (int argc, const char** argv) while (argc > 1) { if (strcmp(argv[i], "-t") == 0) { tNoOfThreads = atoi(argv[i+1]); - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) return(1); } else if (strcmp(argv[i], "-o") == 0) { diff --git a/storage/ndb/test/ndbapi/flexScan.cpp b/storage/ndb/test/ndbapi/flexScan.cpp index e0b07250762..105dfeedfff 100644 --- a/storage/ndb/test/ndbapi/flexScan.cpp +++ b/storage/ndb/test/ndbapi/flexScan.cpp @@ -68,7 +68,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 256 +#define NDB_MAXTHREADS 256 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXATTRSIZE 64 enum StartType { @@ -848,7 +855,7 @@ static int readArguments(int argc, const char** argv) if (strcmp(argv[i], "-t") == 0) { if (argv[i + 1] != NULL) { tNoOfThreads = atoi(argv[i + 1]); - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) { + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) { retValue = -1; } // if } // if diff --git a/storage/ndb/test/ndbapi/flexTT.cpp b/storage/ndb/test/ndbapi/flexTT.cpp index 71d5b6c096e..4373102f77e 100644 --- a/storage/ndb/test/ndbapi/flexTT.cpp +++ b/storage/ndb/test/ndbapi/flexTT.cpp @@ -35,7 +35,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 128 +#define NDB_MAXTHREADS 128 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXPAR 1024 #define MAXATTRSIZE 1000 #define PKSIZE 1 @@ -101,10 +108,10 @@ static void input_error(); ErrorData * flexTTErrorData; -static NdbThread* threadLife[MAXTHREADS]; +static NdbThread* threadLife[NDB_MAXTHREADS]; static int tNodeId; -static int ThreadReady[MAXTHREADS]; -static StartType ThreadStart[MAXTHREADS]; +static int ThreadReady[NDB_MAXTHREADS]; +static StartType ThreadStart[NDB_MAXTHREADS]; static char tableName[1][MAXSTRLEN+1]; static char attrName[5][MAXSTRLEN+1]; @@ -184,7 +191,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535) return NDBT_ProgramExit(NDBT_WRONGARGS); } - pThreadData = new ThreadNdb[MAXTHREADS]; + pThreadData = new ThreadNdb[NDB_MAXTHREADS]; ndbout << endl << "FLEXTT - Starting normal mode" << endl; ndbout << "Perform TimesTen benchmark" << endl; @@ -798,7 +805,7 @@ readArguments(int argc, const char** argv){ while (argc > 1){ if (strcmp(argv[i], "-t") == 0){ tNoOfThreads = atoi(argv[i+1]); - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){ + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){ ndbout_c("Invalid no of threads"); return -1; } diff --git a/storage/ndb/test/ndbapi/flexTimedAsynch.cpp b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp index cc44ab8b237..b6301e59df2 100644 --- a/storage/ndb/test/ndbapi/flexTimedAsynch.cpp +++ b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp @@ -57,7 +57,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 256 +#define NDB_MAXTHREADS 256 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXATTRSIZE 1000 #define PKSIZE 1 @@ -95,10 +102,10 @@ static int failed = 0 ; // lame global variable that keeps track of failed trans // incremented in executeCallback() and reset in main() /************************************************************* < epaulsa */ -static NdbThread* threadLife[MAXTHREADS]; +static NdbThread* threadLife[NDB_MAXTHREADS]; static int tNodeId; -static int ThreadReady[MAXTHREADS]; -static StartType ThreadStart[MAXTHREADS]; +static int ThreadReady[NDB_MAXTHREADS]; +static StartType ThreadStart[NDB_MAXTHREADS]; static char tableName[MAXTABLES][MAXSTRLEN+1]; static char attrName[MAXATTR][MAXSTRLEN+1]; static int *getAttrValueTable; @@ -174,7 +181,7 @@ void deleteAttributeSpace(){ NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535) { ndb_init(); - ThreadNdb tabThread[MAXTHREADS]; + ThreadNdb tabThread[NDB_MAXTHREADS]; int tLoops=0; int returnValue; //NdbOut flexTimedAsynchNdbOut; @@ -615,8 +622,8 @@ void readArguments(int argc, const char** argv) if (strcmp(argv[i], "-t") == 0) { tNoOfThreads = atoi(argv[i+1]); - // if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) - if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) + // if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) + if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) exit(-1); } else if (strcmp(argv[i], "-i") == 0) @@ -628,7 +635,7 @@ void readArguments(int argc, const char** argv) else if (strcmp(argv[i], "-p") == 0) { tNoOfTransInBatch = atoi(argv[i+1]); - //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > MAXTHREADS)) + //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > NDB_MAXTHREADS)) if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > 10000)) exit(-1); } diff --git a/storage/ndb/test/ndbapi/initronja.cpp b/storage/ndb/test/ndbapi/initronja.cpp index 170c3dd5cfb..28ffa9f211d 100644 --- a/storage/ndb/test/ndbapi/initronja.cpp +++ b/storage/ndb/test/ndbapi/initronja.cpp @@ -29,7 +29,14 @@ #define MAXSTRLEN 16 #define MAXATTR 64 #define MAXTABLES 64 -#define MAXTHREADS 256 +#define NDB_MAXTHREADS 256 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS #define MAXATTRSIZE 8000 static unsigned int tNoOfRecords; diff --git a/storage/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp index 5086cfc8b5b..ac23ceaad18 100644 --- a/storage/ndb/test/ndbapi/testBasic.cpp +++ b/storage/ndb/test/ndbapi/testBasic.cpp @@ -136,31 +136,13 @@ int runPkRead(NDBT_Context* ctx, NDBT_Step* step){ int loops = ctx->getNumLoops(); int records = ctx->getNumRecords(); int batchSize = ctx->getProperty("BatchSize", 1); + int lm = ctx->getProperty("LockMode", NdbOperation::LM_Read); int i = 0; HugoTransactions hugoTrans(*ctx->getTab()); while (i<loops) { g_info << i << ": "; - if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize) != NDBT_OK){ - g_info << endl; - return NDBT_FAILED; - } - i++; - } - g_info << endl; - return NDBT_OK; -} - -int runPkDirtyRead(NDBT_Context* ctx, NDBT_Step* step){ - int loops = ctx->getNumLoops(); - int records = ctx->getNumRecords(); - int batchSize = ctx->getProperty("BatchSize", 1); - int i = 0; - bool dirty = true; - HugoTransactions hugoTrans(*ctx->getTab()); - while (i<loops) { - g_info << i << ": "; - if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize, - NdbOperation::LM_CommittedRead) != NDBT_OK){ + if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize, + (NdbOperation::LockMode)lm) != NDBT_OK){ g_info << endl; return NDBT_FAILED; } @@ -1273,71 +1255,63 @@ runBug25090(NDBT_Context* ctx, NDBT_Step* step){ } int -runBug28073(NDBT_Context *ctx, NDBT_Step* step) -{ - int result = NDBT_OK; - const NdbDictionary::Table *table= ctx->getTab(); - HugoOperations hugoOp1(*table); - HugoOperations hugoOp2(*table); +runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){ + Ndb* pNdb = GETNDB(step); - int loops = ctx->getNumLoops(); - bool inserted= false; + const NdbDictionary::Table* tab = ctx->getTab(); + NDBT_ResultRow row(*ctx->getTab()); + HugoTransactions tmp(*ctx->getTab()); + + int a; + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + while (loops--) { - if (!inserted) + NdbTransaction* pTrans = pNdb->startTransaction(); + NdbOperation* pOp = pTrans->getNdbOperation(tab->getName()); + pOp->deleteTuple(); + tmp.equalForRow(pOp, loops); + + // Define attributes to read + for(a = 0; a<tab->getNoOfColumns(); a++) { - CHECK(hugoOp1.startTransaction(pNdb) == 0); - CHECK(hugoOp1.pkInsertRecord(pNdb, 1, 1) == 0); - CHECK(hugoOp1.execute_Commit(pNdb) == 0); - CHECK(hugoOp1.closeTransaction(pNdb) == 0); - inserted= 1; + if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0) { + ERR(pTrans->getNdbError()); + return NDBT_FAILED; + } } - // Use TC hint to hit the same node in both transactions. - Uint32 key_val= 0; - const char *key= (const char *)(&key_val); - CHECK(hugoOp1.startTransaction(pNdb, table, key, 4) == 0); - CHECK(hugoOp2.startTransaction(pNdb, table, key, 4) == 0); + pTrans->execute(Commit); + pTrans->close(); - // First take 2*read lock on the tuple in transaction 1. - for (Uint32 i= 0; i < 2; i++) + pTrans = pNdb->startTransaction(); + pOp = pTrans->getNdbOperation(tab->getName()); + pOp->insertTuple(); + tmp.setValues(pOp, loops, 0); + + pOp = pTrans->getNdbOperation(tab->getName()); + pOp->deleteTuple(); + tmp.equalForRow(pOp, loops); + for(a = 0; a<tab->getNoOfColumns(); a++) { - CHECK(hugoOp1.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); - CHECK(hugoOp1.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0) + { + ERR(pTrans->getNdbError()); + return NDBT_FAILED; + } } - CHECK(hugoOp1.execute_NoCommit(pNdb) == 0); - - // Now send ops in two transactions, one batch. - // First 2*read in transaction 2. - for (Uint32 i= 0; i < 2; i++) + if (pTrans->execute(Commit) != 0) { - CHECK(hugoOp2.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); - CHECK(hugoOp2.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + ERR(pTrans->getNdbError()); + return NDBT_FAILED; } - CHECK(hugoOp2.execute_async_prepare(pNdb, NdbTransaction::NoCommit) == 0); - - // Second op an update in transaction 1. - CHECK(hugoOp1.pkUpdateRecord(pNdb, 1, 1) == 0); - CHECK(hugoOp1.execute_async_prepare(pNdb, NdbTransaction::Commit) == 0); - // Transaction 1 will now hang waiting on transaction 2 to commit before it - // can upgrade its read lock to a write lock. - // With the bug, we get a node failure due to watchdog timeout here. - CHECK(hugoOp2.wait_async(pNdb) == 0); - - // Now commit transaction 2, we should see transaction 1 finish with the - // update. - CHECK(hugoOp2.execute_async_prepare(pNdb, NdbTransaction::Commit) == 0); - CHECK(hugoOp2.wait_async(pNdb) == 0); - // No error check, as transaction 1 may have terminated already. - hugoOp1.wait_async(pNdb); - - CHECK(hugoOp1.closeTransaction(pNdb) == 0); - CHECK(hugoOp2.closeTransaction(pNdb) == 0); + pTrans->close(); } - - return result; + + return NDBT_OK; } int @@ -1396,6 +1370,74 @@ runBug27756(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int +runBug28073(NDBT_Context *ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + const NdbDictionary::Table *table= ctx->getTab(); + HugoOperations hugoOp1(*table); + HugoOperations hugoOp2(*table); + Ndb* pNdb = GETNDB(step); + int loops = ctx->getNumLoops(); + bool inserted= false; + + while (loops--) + { + if (!inserted) + { + CHECK(hugoOp1.startTransaction(pNdb) == 0); + CHECK(hugoOp1.pkInsertRecord(pNdb, 1, 1) == 0); + CHECK(hugoOp1.execute_Commit(pNdb) == 0); + CHECK(hugoOp1.closeTransaction(pNdb) == 0); + inserted= 1; + } + + // Use TC hint to hit the same node in both transactions. + Uint32 key_val= 0; + const char *key= (const char *)(&key_val); + CHECK(hugoOp1.startTransaction(pNdb, table, key, 4) == 0); + CHECK(hugoOp2.startTransaction(pNdb, table, key, 4) == 0); + + // First take 2*read lock on the tuple in transaction 1. + for (Uint32 i= 0; i < 2; i++) + { + CHECK(hugoOp1.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOp1.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + } + CHECK(hugoOp1.execute_NoCommit(pNdb) == 0); + + // Now send ops in two transactions, one batch. + // First 2*read in transaction 2. + for (Uint32 i= 0; i < 2; i++) + { + CHECK(hugoOp2.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOp2.pkReadRecord(pNdb, 1, 1, NdbOperation::LM_Read) == 0); + } + CHECK(hugoOp2.execute_async_prepare(pNdb, NdbTransaction::NoCommit) == 0); + + // Second op an update in transaction 1. + CHECK(hugoOp1.pkUpdateRecord(pNdb, 1, 1) == 0); + CHECK(hugoOp1.execute_async_prepare(pNdb, NdbTransaction::Commit) == 0); + + // Transaction 1 will now hang waiting on transaction 2 to commit before it + // can upgrade its read lock to a write lock. + // With the bug, we get a node failure due to watchdog timeout here. + CHECK(hugoOp2.wait_async(pNdb) == 0); + + // Now commit transaction 2, we should see transaction 1 finish with the + // update. + CHECK(hugoOp2.execute_async_prepare(pNdb, NdbTransaction::Commit) == 0); + CHECK(hugoOp2.wait_async(pNdb) == 0); + // No error check, as transaction 1 may have terminated already. + hugoOp1.wait_async(pNdb); + + CHECK(hugoOp1.closeTransaction(pNdb) == 0); + CHECK(hugoOp2.closeTransaction(pNdb) == 0); + } + + return result; +} + template class Vector<Uint64>; int @@ -1492,14 +1534,23 @@ TESTCASE("PkInsert", } TESTCASE("PkRead", "Verify that we can insert, read and delete from this table using PK"){ + TC_PROPERTY("LockMode", NdbOperation::LM_Read); INITIALIZER(runLoadTable); STEP(runPkRead); FINALIZER(runClearTable); } TESTCASE("PkDirtyRead", "Verify that we can insert, dirty read and delete from this table using PK"){ + TC_PROPERTY("LockMode", NdbOperation::LM_Dirty); INITIALIZER(runLoadTable); - STEP(runPkDirtyRead); + STEP(runPkRead); + FINALIZER(runClearTable); +} +TESTCASE("PkSimpleRead", + "Verify that we can insert, simple read and delete from this table using PK"){ + TC_PROPERTY("LockMode", NdbOperation::LM_SimpleRead); + INITIALIZER(runLoadTable); + STEP(runPkRead); FINALIZER(runClearTable); } TESTCASE("PkUpdate", @@ -1753,14 +1804,20 @@ TESTCASE("Bug25090", "Verify what happens when we fill the db" ){ STEP(runBug25090); } -TESTCASE("Bug28073", - "Infinite loop in lock queue" ){ - STEP(runBug28073); +TESTCASE("DeleteRead", + "Verify Delete+Read" ){ + INITIALIZER(runLoadTable); + INITIALIZER(runDeleteRead); + FINALIZER(runClearTable2); } TESTCASE("Bug27756", "Verify what happens when we fill the db" ){ STEP(runBug27756); } +TESTCASE("Bug28073", + "Infinite loop in lock queue" ){ + STEP(runBug28073); +} TESTCASE("Bug20535", "Verify what happens when we fill the db" ){ STEP(runBug20535); diff --git a/storage/ndb/test/ndbapi/testBlobs.cpp b/storage/ndb/test/ndbapi/testBlobs.cpp index 81072f6a12a..d9c657a0a29 100644 --- a/storage/ndb/test/ndbapi/testBlobs.cpp +++ b/storage/ndb/test/ndbapi/testBlobs.cpp @@ -123,23 +123,25 @@ printusage() << "metadata" << endl << " -pk2len N length of PK2 [" << d.m_pk2len << "/" << g_max_pk2len <<"]" << endl << " -oneblob only 1 blob attribute [default 2]" << endl - << "testcases for test/skip" << endl + << "test cases for test/skip" << endl << " k primary key ops" << endl << " i hash index ops" << endl << " s table scans" << endl << " r ordered index scans" << endl << " p performance test" << endl - << "additional flags for test/skip" << endl + << "operations for test/skip" << endl << " u update existing blob value" << endl << " n normal insert and update" << endl << " w insert and update using writeTuple" << endl + << "blob operation styles for test/skip" << endl << " 0 getValue / setValue" << endl << " 1 setActiveHook" << endl << " 2 readData / writeData" << endl - << "bug tests (no blob test)" << endl + << "example: -test kn0 (need all 3 parts)" << endl + << "bug tests" << endl << " -bug 4088 ndb api hang with mixed ops on index table" << endl - << " -bug nnnn delete + write gives 626" << endl - << " -bug nnnn acc crash on delete and long key" << endl + << " -bug 27018 middle partial part write clobbers rest of part" << endl + << " -bug 27370 Potential inconsistent blob reads for ReadCommitted reads" << endl ; } @@ -734,7 +736,7 @@ verifyHeadInline(const Tup& tup) if (! g_opt.m_oneblob) CHK((ra2 = g_opr->getValue("BL2")) != 0); if (tup.m_exists) { - CHK(g_con->execute(Commit) == 0); + CHK(g_con->execute(Commit, AbortOnError) == 0); DBG("verifyHeadInline BL1"); CHK(verifyHeadInline(g_opt.m_blob1, tup.m_blob1, ra1) == 0); if (! g_opt.m_oneblob) { @@ -742,7 +744,8 @@ verifyHeadInline(const Tup& tup) CHK(verifyHeadInline(g_opt.m_blob2, tup.m_blob2, ra2) == 0); } } else { - CHK(g_con->execute(Commit) == -1 && g_con->getNdbError().code == 626); + CHK(g_con->execute(Commit, AbortOnError) == -1 && + g_con->getNdbError().code == 626); } g_ndb->closeTransaction(g_con); g_opr = 0; @@ -1027,6 +1030,32 @@ deletePk() return 0; } +static int +deleteNoPk() +{ + DBG("--- deleteNoPk ---"); + Tup no_tup; // bug#24028 + no_tup.m_pk1 = 0xb1ffb1ff; + sprintf(no_tup.m_pk2, "%-*.*s", g_opt.m_pk2len, g_opt.m_pk2len, "b1ffb1ff"); + CHK((g_con = g_ndb->startTransaction()) != 0); + Tup& tup = no_tup; + DBG("deletePk pk1=" << hex << tup.m_pk1); + CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->deleteTuple() == 0); + CHK(g_opr->equal("PK1", tup.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", tup.m_pk2) == 0); + CHK(g_con->execute(Commit) == -1); // fail + // BUG: error should be on op but is on con now + DBG("con: " << g_con->getNdbError()); + DBG("opr: " << g_opr->getNdbError()); + CHK(g_con->getNdbError().code == 626 || g_opr->getNdbError().code == 626); + g_ndb->closeTransaction(g_con); + g_opr = 0; + g_con = 0; + return 0; +} + // hash index ops static int @@ -1382,6 +1411,7 @@ testmain() CHK(readPk(style) == 0); } CHK(deletePk() == 0); + CHK(deleteNoPk() == 0); CHK(verifyBlob() == 0); } if (testcase('w')) { @@ -1396,6 +1426,7 @@ testmain() CHK(readPk(style) == 0); } CHK(deletePk() == 0); + CHK(deleteNoPk() == 0); CHK(verifyBlob() == 0); } } @@ -1534,7 +1565,7 @@ testperf() g_dic = g_ndb->getDictionary(); NdbDictionary::Table tab(g_opt.m_tnameperf); if (g_dic->getTable(tab.getName()) != 0) - CHK(g_dic->dropTable(tab) == 0); + CHK(g_dic->dropTable(tab.getName()) == 0); // col A - pk { NdbDictionary::Column col("A"); col.setType(NdbDictionary::Column::Unsigned); @@ -1807,14 +1838,249 @@ bugtest_4088() } static int -bugtest_2222() +bugtest_27018() { + DBG("bug test 27018 - middle partial part write clobbers rest of part"); + + // insert rows + calcTups(false); + CHK(insertPk(false) == 0); + // new trans + for (unsigned k= 0; k < g_opt.m_rows; k++) + { + Tup& tup= g_tups[k]; + + CHK((g_con= g_ndb->startTransaction()) != 0); + CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->updateTuple() == 0); + CHK(g_opr->equal("PK1", tup.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", tup.m_pk2) == 0); + CHK(getBlobHandles(g_opr) == 0); + CHK(g_con->execute(NoCommit) == 0); + + /* Update one byte in random position. */ + Uint32 offset= urandom(tup.m_blob1.m_len); + tup.m_blob1.m_buf[0]= 0xff ^ tup.m_blob1.m_val[offset]; + CHK(g_bh1->setPos(offset) == 0); + CHK(g_bh1->writeData(&(tup.m_blob1.m_buf[0]), 1) == 0); + CHK(g_con->execute(Commit) == 0); + g_ndb->closeTransaction(g_con); + + CHK((g_con= g_ndb->startTransaction()) != 0); + CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->readTuple() == 0); + CHK(g_opr->equal("PK1", tup.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", tup.m_pk2) == 0); + CHK(getBlobHandles(g_opr) == 0); + + CHK(g_bh1->getValue(tup.m_blob1.m_buf, tup.m_blob1.m_len) == 0); + CHK(g_con->execute(Commit) == 0); + Uint64 len= ~0; + CHK(g_bh1->getLength(len) == 0 && len == tup.m_blob1.m_len); + tup.m_blob1.m_buf[offset]^= 0xff; + CHK(memcmp(tup.m_blob1.m_buf, tup.m_blob1.m_val, tup.m_blob1.m_len) == 0); + g_ndb->closeTransaction(g_con); + } + return 0; } + +struct bug27370_data { + Ndb *m_ndb; + char m_current_write_value; + char *m_writebuf; + Uint32 m_blob1_size; + Uint32 m_pk1; + char m_pk2[g_max_pk2len + 1]; + bool m_thread_stop; +}; + +void *bugtest_27370_thread(void *arg) +{ + bug27370_data *data= (bug27370_data *)arg; + + while (!data->m_thread_stop) + { + memset(data->m_writebuf, data->m_current_write_value, data->m_blob1_size); + data->m_current_write_value++; + + NdbConnection *con; + if ((con= data->m_ndb->startTransaction()) == 0) + return (void *)"Failed to create transaction"; + NdbOperation *opr; + if ((opr= con->getNdbOperation(g_opt.m_tname)) == 0) + return (void *)"Failed to create operation"; + if (opr->writeTuple() != 0) + return (void *)"writeTuple() failed"; + if (opr->equal("PK1", data->m_pk1) != 0) + return (void *)"equal(PK1) failed"; + if (g_opt.m_pk2len != 0) + if (opr->equal("PK2", data->m_pk2) != 0) + return (void *)"equal(PK2) failed"; + NdbBlob *bh; + if ((bh= opr->getBlobHandle("BL1")) == 0) + return (void *)"getBlobHandle() failed"; + if (bh->setValue(data->m_writebuf, data->m_blob1_size) != 0) + return (void *)"setValue() failed"; + if (con->execute(Commit, AbortOnError, 1) != 0) + return (void *)"execute() failed"; + data->m_ndb->closeTransaction(con); + } + + return NULL; // Success +} + static int -bugtest_3333() +bugtest_27370() { + DBG("bug test 27370 - Potential inconsistent blob reads for ReadCommitted reads"); + + bug27370_data data; + + data.m_ndb= new Ndb(g_ncc, "TEST_DB"); + CHK(data.m_ndb->init(20) == 0); + CHK(data.m_ndb->waitUntilReady() == 0); + + data.m_current_write_value= 0; + data.m_blob1_size= g_opt.m_blob1.m_inline + 10 * g_opt.m_blob1.m_partsize; + CHK((data.m_writebuf= new char [data.m_blob1_size]) != 0); + data.m_pk1= 27370; + memset(data.m_pk2, 'x', g_max_pk2len); + data.m_pk2[g_max_pk2len]= '\0'; + data.m_thread_stop= false; + + memset(data.m_writebuf, data.m_current_write_value, data.m_blob1_size); + data.m_current_write_value++; + + CHK((g_con= g_ndb->startTransaction()) != 0); + CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->writeTuple() == 0); + CHK(g_opr->equal("PK1", data.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", data.m_pk2) == 0); + CHK((g_bh1= g_opr->getBlobHandle("BL1")) != 0); + CHK(g_bh1->setValue(data.m_writebuf, data.m_blob1_size) == 0); + CHK(g_con->execute(Commit) == 0); + g_ndb->closeTransaction(g_con); + g_con= NULL; + + pthread_t thread_handle; + CHK(pthread_create(&thread_handle, NULL, bugtest_27370_thread, &data) == 0); + + DBG("bug test 27370 - PK blob reads"); + Uint32 seen_updates= 0; + while (seen_updates < 50) + { + CHK((g_con= g_ndb->startTransaction()) != 0); + CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0); + CHK(g_opr->readTuple(NdbOperation::LM_CommittedRead) == 0); + CHK(g_opr->equal("PK1", data.m_pk1) == 0); + if (g_opt.m_pk2len != 0) + CHK(g_opr->equal("PK2", data.m_pk2) == 0); + CHK((g_bh1= g_opr->getBlobHandle("BL1")) != 0); + CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0); + + const Uint32 loop_max= 10; + char read_char; + char original_read_char= 0; + Uint32 readloop; + for (readloop= 0;; readloop++) + { + if (readloop > 0) + { + if (readloop > 1) + { + /* Compare against first read. */ + CHK(read_char == original_read_char); + } + else + { + /* + We count the number of times we see the other thread had the + chance to update, so that we can be sure it had the opportunity + to run a reasonable number of times before we stop. + */ + if (original_read_char != read_char) + seen_updates++; + original_read_char= read_char; + } + } + if (readloop > loop_max) + break; + Uint32 readSize= 1; + CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0); + CHK(g_bh1->readData(&read_char, readSize) == 0); + CHK(readSize == 1); + ExecType commitType= readloop == loop_max ? Commit : NoCommit; + CHK(g_con->execute(commitType, AbortOnError, 1) == 0); + } + g_ndb->closeTransaction(g_con); + g_con= NULL; + } + + DBG("bug test 27370 - table scan blob reads"); + seen_updates= 0; + while (seen_updates < 50) + { + CHK((g_con= g_ndb->startTransaction()) != 0); + CHK((g_ops= g_con->getNdbScanOperation(g_opt.m_tname)) != 0); + CHK(g_ops->readTuples(NdbOperation::LM_CommittedRead) == 0); + CHK((g_bh1= g_ops->getBlobHandle("BL1")) != 0); + CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0); + CHK(g_ops->nextResult(true) == 0); + + const Uint32 loop_max= 10; + char read_char; + char original_read_char= 0; + Uint32 readloop; + for (readloop= 0;; readloop++) + { + if (readloop > 0) + { + if (readloop > 1) + { + /* Compare against first read. */ + CHK(read_char == original_read_char); + } + else + { + /* + We count the number of times we see the other thread had the + chance to update, so that we can be sure it had the opportunity + to run a reasonable number of times before we stop. + */ + if (original_read_char != read_char) + seen_updates++; + original_read_char= read_char; + } + } + if (readloop > loop_max) + break; + Uint32 readSize= 1; + CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0); + CHK(g_bh1->readData(&read_char, readSize) == 0); + CHK(readSize == 1); + CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0); + } + + CHK(g_ops->nextResult(true) == 1); + g_ndb->closeTransaction(g_con); + g_con= NULL; + } + + data.m_thread_stop= true; + void *thread_return; + CHK(pthread_join(thread_handle, &thread_return) == 0); + DBG("bug 27370 - thread return status: " << + (thread_return ? (char *)thread_return : "<null>")); + CHK(thread_return == 0); + + g_con= NULL; + g_opr= NULL; + g_bh1= NULL; return 0; } @@ -1822,7 +2088,9 @@ static struct { int m_bug; int (*m_test)(); } g_bugtest[] = { - { 4088, bugtest_4088 } + { 4088, bugtest_4088 }, + { 27018, bugtest_27018 }, + { 27370, bugtest_27370 } }; NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535) diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index 656b074ce8b..16b6e129605 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -23,6 +23,7 @@ #include <../../include/kernel/ndb_limits.h> #include <random.h> #include <NdbAutoPtr.hpp> +#include <NdbMixRestarter.hpp> #define CHECK(b) if (!(b)) { \ g_err << "ERR: "<< step->getName() \ @@ -321,7 +322,11 @@ int runCreateAndDropAtRandom(NDBT_Context* ctx, NDBT_Step* step) } i++; } - + + for (Uint32 i = 0; i<numTables; i++) + if (tabList[i]) + pDic->dropTable(NDBT_Tables::getTable(i)->getName()); + delete [] tabList; return result; } @@ -2106,7 +2111,857 @@ runDictOps(NDBT_Context* ctx, NDBT_Step* step) return result; } +int +runBug21755(NDBT_Context* ctx, NDBT_Step* step) +{ + char buf[256]; + NdbRestarter res; + NdbDictionary::Table pTab0 = * ctx->getTab(); + NdbDictionary::Table pTab1 = pTab0; + + if (res.getNumDbNodes() < 2) + return NDBT_OK; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDic = pNdb->getDictionary(); + + if (pDic->createTable(pTab0)) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Index idx0; + BaseString::snprintf(buf, sizeof(buf), "%s-idx", pTab0.getName()); + idx0.setName(buf); + idx0.setType(NdbDictionary::Index::OrderedIndex); + idx0.setTable(pTab0.getName()); + idx0.setStoredIndex(false); + for (Uint32 i = 0; i<pTab0.getNoOfColumns(); i++) + { + const NdbDictionary::Column * col = pTab0.getColumn(i); + if(col->getPrimaryKey()){ + idx0.addIndexColumn(col->getName()); + } + } + + if (pDic->createIndex(idx0)) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + BaseString::snprintf(buf, sizeof(buf), "%s-2", pTab1.getName()); + pTab1.setName(buf); + + if (pDic->createTable(pTab1)) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + { + HugoTransactions t0 (*pDic->getTable(pTab0.getName())); + t0.loadTable(pNdb, 1000); + } + + { + HugoTransactions t1 (*pDic->getTable(pTab1.getName())); + t1.loadTable(pNdb, 1000); + } + + int node = res.getRandomNotMasterNodeId(rand()); + res.restartOneDbNode(node, false, true, true); + + if (pDic->dropTable(pTab1.getName())) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + BaseString::snprintf(buf, sizeof(buf), "%s-idx2", pTab0.getName()); + idx0.setName(buf); + if (pDic->createIndex(idx0)) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + res.waitNodesNoStart(&node, 1); + res.startNodes(&node, 1); + + if (res.waitClusterStarted()) + { + return NDBT_FAILED; + } + + if (pDic->dropTable(pTab0.getName())) + { + ndbout << pDic->getNdbError() << endl; + return NDBT_FAILED; + } + + return NDBT_OK; +} + +static +int +create_tablespace(NdbDictionary::Dictionary* pDict, + const char * lgname, + const char * tsname, + const char * dfname) +{ + NdbDictionary::Tablespace ts; + ts.setName(tsname); + ts.setExtentSize(1024*1024); + ts.setDefaultLogfileGroup(lgname); + + if(pDict->createTablespace(ts) != 0) + { + g_err << "Failed to create tablespace:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Datafile df; + df.setPath(dfname); + df.setSize(1*1024*1024); + df.setTablespace(tsname); + + if(pDict->createDatafile(df) != 0) + { + g_err << "Failed to create datafile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + return 0; +} + +int +runBug24631(NDBT_Context* ctx, NDBT_Step* step) +{ + char tsname[256]; + char dfname[256]; + char lgname[256]; + char ufname[256]; + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + return NDBT_OK; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + NdbDictionary::Dictionary::List list; + if (pDict->listObjects(list) == -1) + return NDBT_FAILED; + + const char * lgfound = 0; + + for (Uint32 i = 0; i<list.count; i++) + { + switch(list.elements[i].type){ + case NdbDictionary::Object::LogfileGroup: + lgfound = list.elements[i].name; + break; + default: + break; + } + if (lgfound) + break; + } + + if (lgfound == 0) + { + BaseString::snprintf(lgname, sizeof(lgname), "LG-%u", rand()); + NdbDictionary::LogfileGroup lg; + + lg.setName(lgname); + lg.setUndoBufferSize(8*1024*1024); + if(pDict->createLogfileGroup(lg) != 0) + { + g_err << "Failed to create logfilegroup:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Undofile uf; + BaseString::snprintf(ufname, sizeof(ufname), "%s-%u", lgname, rand()); + uf.setPath(ufname); + uf.setSize(2*1024*1024); + uf.setLogfileGroup(lgname); + + if(pDict->createUndofile(uf) != 0) + { + g_err << "Failed to create undofile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + else + { + BaseString::snprintf(lgname, sizeof(lgname), "%s", lgfound); + } + + BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand()); + BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand()); + + if (create_tablespace(pDict, lgname, tsname, dfname)) + return NDBT_FAILED; + + + int node = res.getRandomNotMasterNodeId(rand()); + res.restartOneDbNode(node, false, true, true); + NdbSleep_SecSleep(3); + + if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0) + { + g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0) + { + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (res.waitNodesNoStart(&node, 1)) + return NDBT_FAILED; + + res.startNodes(&node, 1); + if (res.waitClusterStarted()) + return NDBT_FAILED; + + if (create_tablespace(pDict, lgname, tsname, dfname)) + return NDBT_FAILED; + + if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0) + { + g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0) + { + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (lgfound == 0) + { + if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgname)) != 0) + return NDBT_FAILED; + } + + return NDBT_OK; +} + +struct RandSchemaOp +{ + struct Obj + { + BaseString m_name; + Uint32 m_type; + struct Obj* m_parent; + Vector<Obj*> m_dependant; + }; + + Vector<Obj*> m_objects; + + int schema_op(Ndb*); + int validate(Ndb*); + int cleanup(Ndb*); + + Obj* get_obj(Uint32 mask); + int create_table(Ndb*); + int create_index(Ndb*, Obj*); + int drop_obj(Ndb*, Obj*); + + void remove_obj(Obj*); +}; + +template class Vector<RandSchemaOp::Obj*>; + +int +RandSchemaOp::schema_op(Ndb* ndb) +{ + struct Obj* obj = 0; + Uint32 type = 0; +loop: + switch((rand() >> 16) & 3){ + case 0: + return create_table(ndb); + case 1: + if ((obj = get_obj(1 << NdbDictionary::Object::UserTable)) == 0) + goto loop; + return create_index(ndb, obj); + case 2: + type = (1 << NdbDictionary::Object::UserTable); + goto drop_object; + case 3: + type = + (1 << NdbDictionary::Object::UniqueHashIndex) | + (1 << NdbDictionary::Object::OrderedIndex); + goto drop_object; + default: + goto loop; + } + +drop_object: + if ((obj = get_obj(type)) == 0) + goto loop; + return drop_obj(ndb, obj); +} + +RandSchemaOp::Obj* +RandSchemaOp::get_obj(Uint32 mask) +{ + Vector<Obj*> tmp; + for (Uint32 i = 0; i<m_objects.size(); i++) + { + if ((1 << m_objects[i]->m_type) & mask) + tmp.push_back(m_objects[i]); + } + + if (tmp.size()) + { + return tmp[rand()%tmp.size()]; + } + return 0; +} + +int +RandSchemaOp::create_table(Ndb* ndb) +{ + int numTables = NDBT_Tables::getNumTables(); + int num = myRandom48(numTables); + NdbDictionary::Table pTab = * NDBT_Tables::getTable(num); + + NdbDictionary::Dictionary* pDict = ndb->getDictionary(); + + if (pDict->getTable(pTab.getName())) + { + char buf[100]; + BaseString::snprintf(buf, sizeof(buf), "%s-%d", + pTab.getName(), rand()); + pTab.setName(buf); + if (pDict->createTable(pTab)) + return NDBT_FAILED; + } + else + { + if (NDBT_Tables::createTable(ndb, pTab.getName())) + { + return NDBT_FAILED; + } + } + + ndbout_c("create table %s", pTab.getName()); + const NdbDictionary::Table* tab2 = pDict->getTable(pTab.getName()); + HugoTransactions trans(*tab2); + trans.loadTable(ndb, 1000); + + Obj *obj = new Obj; + obj->m_name.assign(pTab.getName()); + obj->m_type = NdbDictionary::Object::UserTable; + obj->m_parent = 0; + m_objects.push_back(obj); + + return NDBT_OK; +} + +int +RandSchemaOp::create_index(Ndb* ndb, Obj* tab) +{ + NdbDictionary::Dictionary* pDict = ndb->getDictionary(); + const NdbDictionary::Table * pTab = pDict->getTable(tab->m_name.c_str()); + + if (pTab == 0) + { + return NDBT_FAILED; + } + + bool ordered = (rand() >> 16) & 1; + bool stored = (rand() >> 16) & 1; + + Uint32 type = ordered ? + NdbDictionary::Index::OrderedIndex : + NdbDictionary::Index::UniqueHashIndex; + + char buf[255]; + BaseString::snprintf(buf, sizeof(buf), "%s-%s", + pTab->getName(), + ordered ? "OI" : "UI"); + + if (pDict->getIndex(buf, pTab->getName())) + { + // Index exists...let it be ok + return NDBT_OK; + } + + ndbout_c("create index %s", buf); + NdbDictionary::Index idx0; + idx0.setName(buf); + idx0.setType((NdbDictionary::Index::Type)type); + idx0.setTable(pTab->getName()); + idx0.setStoredIndex(ordered ? false : stored); + + for (Uint32 i = 0; i<pTab->getNoOfColumns(); i++) + { + if (pTab->getColumn(i)->getPrimaryKey()) + idx0.addColumn(pTab->getColumn(i)->getName()); + } + if (pDict->createIndex(idx0)) + { + ndbout << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + Obj *obj = new Obj; + obj->m_name.assign(buf); + obj->m_type = type; + obj->m_parent = tab; + m_objects.push_back(obj); + + tab->m_dependant.push_back(obj); + return NDBT_OK; +} + +int +RandSchemaOp::drop_obj(Ndb* ndb, Obj* obj) +{ + NdbDictionary::Dictionary* pDict = ndb->getDictionary(); + + if (obj->m_type == NdbDictionary::Object::UserTable) + { + ndbout_c("drop table %s", obj->m_name.c_str()); + /** + * Drop of table automatically drops all indexes + */ + if (pDict->dropTable(obj->m_name.c_str())) + { + return NDBT_FAILED; + } + while(obj->m_dependant.size()) + { + remove_obj(obj->m_dependant[0]); + } + remove_obj(obj); + } + else if (obj->m_type == NdbDictionary::Object::UniqueHashIndex || + obj->m_type == NdbDictionary::Object::OrderedIndex) + { + ndbout_c("drop index %s", obj->m_name.c_str()); + if (pDict->dropIndex(obj->m_name.c_str(), + obj->m_parent->m_name.c_str())) + { + return NDBT_FAILED; + } + remove_obj(obj); + } + return NDBT_OK; +} + +void +RandSchemaOp::remove_obj(Obj* obj) +{ + Uint32 i; + if (obj->m_parent) + { + bool found = false; + for (i = 0; i<obj->m_parent->m_dependant.size(); i++) + { + if (obj->m_parent->m_dependant[i] == obj) + { + found = true; + obj->m_parent->m_dependant.erase(i); + break; + } + } + assert(found); + } + + { + bool found = false; + for (i = 0; i<m_objects.size(); i++) + { + if (m_objects[i] == obj) + { + found = true; + m_objects.erase(i); + break; + } + } + assert(found); + } + delete obj; +} + +int +RandSchemaOp::validate(Ndb* ndb) +{ + NdbDictionary::Dictionary* pDict = ndb->getDictionary(); + for (Uint32 i = 0; i<m_objects.size(); i++) + { + if (m_objects[i]->m_type == NdbDictionary::Object::UserTable) + { + const NdbDictionary::Table* tab2 = + pDict->getTable(m_objects[i]->m_name.c_str()); + HugoTransactions trans(*tab2); + trans.scanUpdateRecords(ndb, 1000); + trans.clearTable(ndb); + trans.loadTable(ndb, 1000); + } + } + + return NDBT_OK; +} + +/* + SystemTable = 1, ///< System table + UserTable = 2, ///< User table (may be temporary) + UniqueHashIndex = 3, ///< Unique un-ordered hash index + OrderedIndex = 6, ///< Non-unique ordered index + HashIndexTrigger = 7, ///< Index maintenance, internal + IndexTrigger = 8, ///< Index maintenance, internal + SubscriptionTrigger = 9,///< Backup or replication, internal + ReadOnlyConstraint = 10,///< Trigger, internal + Tablespace = 20, ///< Tablespace + LogfileGroup = 21, ///< Logfile group + Datafile = 22, ///< Datafile + Undofile = 23 ///< Undofile +*/ + +int +RandSchemaOp::cleanup(Ndb* ndb) +{ + Int32 i; + for (i = m_objects.size() - 1; i >= 0; i--) + { + switch(m_objects[i]->m_type){ + case NdbDictionary::Object::UniqueHashIndex: + case NdbDictionary::Object::OrderedIndex: + if (drop_obj(ndb, m_objects[i])) + return NDBT_FAILED; + + break; + default: + break; + } + } + + for (i = m_objects.size() - 1; i >= 0; i--) + { + switch(m_objects[i]->m_type){ + case NdbDictionary::Object::UserTable: + if (drop_obj(ndb, m_objects[i])) + return NDBT_FAILED; + break; + default: + break; + } + } + + assert(m_objects.size() == 0); + return NDBT_OK; +} + +int +runDictRestart(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb* pNdb = GETNDB(step); + int loops = ctx->getNumLoops(); + + NdbMixRestarter res; + + RandSchemaOp dict; + if (res.getNumDbNodes() < 2) + return NDBT_OK; + + if (res.init(ctx, step)) + return NDBT_FAILED; + + for (Uint32 i = 0; i<loops; i++) + { + for (Uint32 j = 0; j<10; j++) + if (dict.schema_op(pNdb)) + return NDBT_FAILED; + + if (res.dostep(ctx, step)) + return NDBT_FAILED; + + if (dict.validate(pNdb)) + return NDBT_FAILED; + } + + if (res.finish(ctx, step)) + return NDBT_FAILED; + + if (dict.validate(pNdb)) + return NDBT_FAILED; + + if (dict.cleanup(pNdb)) + return NDBT_FAILED; + + return NDBT_OK; +} + +int +runBug29501(NDBT_Context* ctx, NDBT_Step* step) { + NdbRestarter res; + NdbDictionary::LogfileGroup lg; + lg.setName("DEFAULT-LG"); + lg.setUndoBufferSize(8*1024*1024); + + if (res.getNumDbNodes() < 2) + return NDBT_OK; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + int node = res.getRandomNotMasterNodeId(rand()); + res.restartOneDbNode(node, true, true, false); + + if(pDict->createLogfileGroup(lg) != 0){ + g_err << "Failed to create logfilegroup:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Undofile uf; + uf.setPath("undofile01.dat"); + uf.setSize(5*1024*1024); + uf.setLogfileGroup("DEFAULT-LG"); + + if(pDict->createUndofile(uf) != 0){ + g_err << "Failed to create undofile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + res.waitNodesNoStart(&node, 1); + res.startNodes(&node, 1); + + if (res.waitClusterStarted()){ + g_err << "Node restart failed" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lg.getName())) != 0){ + g_err << "Drop of LFG Failed" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + return NDBT_OK; +} + +int +runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){ + //Purpose is to drop all tables, data files, Table spaces and LFG's + Uint32 i = 0; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + NdbDictionary::Dictionary::List list; + if (pDict->listObjects(list) == -1) + return NDBT_FAILED; + + //Search the list and drop all tables found + const char * tableFound = 0; + for (i = 0; i < list.count; i++){ + switch(list.elements[i].type){ + case NdbDictionary::Object::UserTable: + tableFound = list.elements[i].name; + if(tableFound != 0){ + if(pDict->dropTable(tableFound) != 0){ + g_err << "Failed to drop table: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + tableFound = 0; + break; + default: + break; + } + } + + //Search the list and drop all data file found + const char * dfFound = 0; + for (i = 0; i < list.count; i++){ + switch(list.elements[i].type){ + case NdbDictionary::Object::Datafile: + dfFound = list.elements[i].name; + if(dfFound != 0){ + if(pDict->dropDatafile(pDict->getDatafile(0, dfFound)) != 0){ + g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + dfFound = 0; + break; + default: + break; + } + } + + //Search the list and drop all Table Spaces Found + const char * tsFound = 0; + for (i = 0; i <list.count; i++){ + switch(list.elements[i].type){ + case NdbDictionary::Object::Tablespace: + tsFound = list.elements[i].name; + if(tsFound != 0){ + if(pDict->dropTablespace(pDict->getTablespace(tsFound)) != 0){ + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + tsFound = 0; + break; + default: + break; + } + } + + //Search the list and drop all LFG Found + //Currently only 1 LGF is supported, but written for future + //when more then one is supported. + const char * lgFound = 0; + for (i = 0; i < list.count; i++){ + switch(list.elements[i].type){ + case NdbDictionary::Object::LogfileGroup: + lgFound = list.elements[i].name; + if(lgFound != 0){ + if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgFound)) != 0){ + g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + } + lgFound = 0; + break; + default: + break; + } + } + + return NDBT_OK; +} + +int +runWaitStarted(NDBT_Context* ctx, NDBT_Step* step){ + + NdbRestarter restarter; + restarter.waitClusterStarted(300); + + NdbSleep_SecSleep(3); + return NDBT_OK; +} + +int +testDropDDObjectsSetup(NDBT_Context* ctx, NDBT_Step* step){ + //Purpose is to setup to test DropDDObjects + char tsname[256]; + char dfname[256]; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + NdbDictionary::LogfileGroup lg; + lg.setName("DEFAULT-LG"); + lg.setUndoBufferSize(8*1024*1024); + + + if(pDict->createLogfileGroup(lg) != 0){ + g_err << "Failed to create logfilegroup:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + NdbDictionary::Undofile uf; + uf.setPath("undofile01.dat"); + uf.setSize(5*1024*1024); + uf.setLogfileGroup("DEFAULT-LG"); + + if(pDict->createUndofile(uf) != 0){ + g_err << "Failed to create undofile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand()); + BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand()); + + if (create_tablespace(pDict, lg.getName(), tsname, dfname)){ + g_err << "Failed to create undofile:" + << endl << pDict->getNdbError() << endl; + return NDBT_FAILED; + } + + return NDBT_OK; +} + +int +DropDDObjectsVerify(NDBT_Context* ctx, NDBT_Step* step){ + //Purpose is to verify test DropDDObjects worked + Uint32 i = 0; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); + + NdbDictionary::Dictionary::List list; + if (pDict->listObjects(list) == -1) + return NDBT_FAILED; + + bool ddFound = false; + for (i = 0; i <list.count; i++){ + switch(list.elements[i].type){ + case NdbDictionary::Object::Tablespace: + ddFound = true; + break; + case NdbDictionary::Object::LogfileGroup: + ddFound = true; + break; + default: + break; + } + if(ddFound == true){ + g_err << "DropDDObjects Failed: DD found:" + << endl; + return NDBT_FAILED; + } + } + return NDBT_OK; +} + NDBT_TESTSUITE(testDict); +TESTCASE("testDropDDObjects", + "* 1. start cluster\n" + "* 2. Create LFG\n" + "* 3. create TS\n" + "* 4. run DropDDObjects\n" + "* 5. Verify DropDDObjectsRestart worked\n"){ +INITIALIZER(runWaitStarted); +INITIALIZER(runDropDDObjects); +INITIALIZER(testDropDDObjectsSetup); +STEP(runDropDDObjects); +FINALIZER(DropDDObjectsVerify); +} + +TESTCASE("Bug29501", + "* 1. start cluster\n" + "* 2. Restart 1 node -abort -nostart\n" + "* 3. create LFG\n" + "* 4. Restart data node\n" + "* 5. Restart 1 node -nostart\n" + "* 6. Drop LFG\n"){ +INITIALIZER(runWaitStarted); +INITIALIZER(runDropDDObjects); +STEP(runBug29501); +FINALIZER(runDropDDObjects); +} TESTCASE("CreateAndDrop", "Try to create and drop the table loop number of times\n"){ INITIALIZER(runCreateAndDrop); @@ -2252,7 +3107,18 @@ TESTCASE("Restart_NR2", STEP(runRestarts); STEP(runDictOps); } - +TESTCASE("Bug21755", + ""){ + INITIALIZER(runBug21755); +} +TESTCASE("DictRestart", + ""){ + INITIALIZER(runDictRestart); +} +TESTCASE("Bug24631", + ""){ + INITIALIZER(runBug24631); +} NDBT_TESTSUITE_END(testDict); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testIndex.cpp b/storage/ndb/test/ndbapi/testIndex.cpp index e52aafa296b..bd9ff7ac607 100644 --- a/storage/ndb/test/ndbapi/testIndex.cpp +++ b/storage/ndb/test/ndbapi/testIndex.cpp @@ -1298,6 +1298,103 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step) return res; } +int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err) +{ + int result= NDBT_OK; + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary * dict = pNdb->getDictionary(); + const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, + ctx->getTab()->getName()); + + HugoOperations ops(*ctx->getTab(), idx); + + g_err << "Using INDEX: " << pkIdxName << endl; + + NdbRestarter restarter; + + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + const int batchsize = ctx->getProperty("BatchSize", 1); + + for(int bs=1; bs < loops; bs++) + { + int c= 0; + while (c++ < loops) + { + g_err << "BS " << bs << " LOOP #" << c << endl; + + g_err << "inserting error on op#" << c << endl; + + CHECK(ops.startTransaction(pNdb) == 0); + for(int i=1;i<=c;i++) + { + if(i==c) + { + if(restarter.insertErrorInAllNodes(inject_err)!=0) + { + g_err << "**** FAILED to insert error" << endl; + result= NDBT_FAILED; + break; + } + } + CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0); + if(i%bs==0 || i==c) + { + if(i<c) + { + if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=NDBT_OK) + { + g_err << "**** executeNoCommit should have succeeded" << endl; + result= NDBT_FAILED; + } + } + else + { + if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=289) + { + g_err << "**** executeNoCommit should have failed with 289" + << endl; + result= NDBT_FAILED; + } + g_err << "NdbError.code= " << + ops.getTransaction()->getNdbError().code << endl; + break; + } + } + } + + CHECK(ops.closeTransaction(pNdb) == 0); + + if(restarter.insertErrorInAllNodes(0) != 0) + { + g_err << "**** Failed to error insert(0)" << endl; + return NDBT_FAILED; + } + + CHECK(ops.startTransaction(pNdb) == 0); + if (ops.indexReadRecords(pNdb, pkIdxName,0,0,rows) != 0){ + g_err << "**** Index read failed" << endl; + return NDBT_FAILED; + } + CHECK(ops.closeTransaction(pNdb) == 0); + } + } + + return result; +} + +int +runBug28804(NDBT_Context* ctx, NDBT_Step* step) +{ + return tcSaveINDX_test(ctx, step, 8052); +} + +int +runBug28804_ATTRINFO(NDBT_Context* ctx, NDBT_Step* step) +{ + return tcSaveINDX_test(ctx, step, 8051); +} + NDBT_TESTSUITE(testIndex); TESTCASE("CreateAll", "Test that we can create all various indexes on each table\n" @@ -1629,6 +1726,27 @@ TESTCASE("Bug25059", STEP(runBug25059); FINALIZER(createPkIndex_Drop); } +TESTCASE("Bug28804", + "Test behaviour on out of TransactionBufferMemory for index lookup"){ + TC_PROPERTY("LoggedIndexes", (unsigned)0); + INITIALIZER(runClearTable); + INITIALIZER(createPkIndex); + INITIALIZER(runLoadTable); + STEP(runBug28804); + FINALIZER(createPkIndex_Drop); + FINALIZER(runClearTable); +} +TESTCASE("Bug28804_ATTRINFO", + "Test behaviour on out of TransactionBufferMemory for index lookup" + " in saveINDXATTRINFO"){ + TC_PROPERTY("LoggedIndexes", (unsigned)0); + INITIALIZER(runClearTable); + INITIALIZER(createPkIndex); + INITIALIZER(runLoadTable); + STEP(runBug28804_ATTRINFO); + FINALIZER(createPkIndex_Drop); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testIndex); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testIndexStat.cpp b/storage/ndb/test/ndbapi/testIndexStat.cpp index 7c69361a732..0e15cdd80d1 100644 --- a/storage/ndb/test/ndbapi/testIndexStat.cpp +++ b/storage/ndb/test/ndbapi/testIndexStat.cpp @@ -1210,8 +1210,8 @@ struct V_rir { static double data(const Range& range) { return (double)range.errpct; } }; -template static void computestat<Key, V_rpk>(Stat& stat); -template static void computestat<Range, V_rir>(Stat& stat); +template void computestat<Key, V_rpk>(Stat& stat); +template void computestat<Range, V_rir>(Stat& stat); static Stat g_stat_rpk; // summaries over loops static Stat g_stat_rir; @@ -1297,43 +1297,43 @@ my_long_options[] = { NDB_STD_OPTS("testIndexStat"), { "loglevel", 1001, "Logging level in this program 0-3 (default 0)", - (gptr*)&g_opts.loglevel, (gptr*)&g_opts.loglevel, 0, + (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "seed", 1002, "Random seed (0=loop number, default -1=random)", - (gptr*)&g_opts.seed, (gptr*)&g_opts.seed, 0, + (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0, GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 }, { "loop", 1003, "Number of test loops (default 1, 0=forever)", - (gptr*)&g_opts.loop, (gptr*)&g_opts.loop, 0, + (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0, GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 }, { "rows", 1004, "Number of rows (default 100000)", - (gptr*)&g_opts.rows, (gptr*)&g_opts.rows, 0, + (uchar **)&g_opts.rows, (uchar **)&g_opts.rows, 0, GET_UINT, REQUIRED_ARG, 100000, 0, 0, 0, 0, 0 }, { "ops", 1005, "Number of index scans per loop (default 1000)", - (gptr*)&g_opts.ops, (gptr*)&g_opts.ops, 0, + (uchar **)&g_opts.ops, (uchar **)&g_opts.ops, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "dupkeys", 1006, "Pct records per key (min 100, default 1000)", - (gptr*)&g_opts.dupkeys, (gptr*)&g_opts.dupkeys, 0, + (uchar **)&g_opts.dupkeys, (uchar **)&g_opts.dupkeys, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "scanpct", 1007, "Preferred max pct of total rows per scan (default 5)", - (gptr*)&g_opts.scanpct, (gptr*)&g_opts.scanpct, 0, + (uchar **)&g_opts.scanpct, (uchar **)&g_opts.scanpct, 0, GET_UINT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "nullkeys", 1008, "Pct nulls in each key attribute (default 10)", - (gptr*)&g_opts.nullkeys, (gptr*)&g_opts.nullkeys, 0, + (uchar **)&g_opts.nullkeys, (uchar **)&g_opts.nullkeys, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "eqscans", 1009, "Pct scans for partial/full equality (default 50)", - (gptr*)&g_opts.eqscans, (gptr*)&g_opts.eqscans, 0, + (uchar **)&g_opts.eqscans, (uchar **)&g_opts.eqscans, 0, GET_UINT, REQUIRED_ARG, 50, 0, 0, 0, 0, 0 }, { "dupscans", 1010, "Pct scans using same bounds (default 10)", - (gptr*)&g_opts.dupscans, (gptr*)&g_opts.dupscans, 0, + (uchar **)&g_opts.dupscans, (uchar **)&g_opts.dupscans, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "keeptable", 1011, "Use existing table and data if any and do not drop", - (gptr*)&g_opts.keeptable, (gptr*)&g_opts.keeptable, 0, + (uchar **)&g_opts.keeptable, (uchar **)&g_opts.keeptable, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-extra-checks", 1012, "Omit expensive consistency checks", - (gptr*)&g_opts.nochecks, (gptr*)&g_opts.nochecks, 0, + (uchar **)&g_opts.nochecks, (uchar **)&g_opts.nochecks, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "abort-on-error", 1013, "Dump core on any error", - (gptr*)&g_opts.abort, (gptr*)&g_opts.abort, 0, + (uchar **)&g_opts.abort, (uchar **)&g_opts.abort, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, diff --git a/storage/ndb/test/ndbapi/testMgm.cpp b/storage/ndb/test/ndbapi/testMgm.cpp index c4301165497..e43972c8c29 100644 --- a/storage/ndb/test/ndbapi/testMgm.cpp +++ b/storage/ndb/test/ndbapi/testMgm.cpp @@ -22,6 +22,9 @@ #include <random.h> #include <mgmapi.h> #include <mgmapi_debug.h> +#include <ndb_logevent.h> +#include <InputStream.hpp> +#include <signaldata/EventReport.hpp> int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){ @@ -191,6 +194,8 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step) ndb_mgm_set_connectstring(h, mgm); ndb_mgm_connect(h,0,0,0); + NdbSleep_SecSleep(1); + if(ndb_mgm_get_session(h,session_id,&sess,&slen)) { ndbout << "Failed, session still exists" << endl; @@ -207,6 +212,580 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step) } } +int runTestApiConnectTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_FAILED; + int cc= 0; + int mgmd_nodeid= 0; + ndb_mgm_reply reply; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + ndbout << "TEST connect timeout" << endl; + + ndb_mgm_set_timeout(h, 3000); + + struct timeval tstart, tend; + int secs; + timerclear(&tstart); + timerclear(&tend); + gettimeofday(&tstart,NULL); + + ndb_mgm_connect(h,0,0,0); + + gettimeofday(&tend,NULL); + + secs= tend.tv_sec - tstart.tv_sec; + ndbout << "Took about: " << secs <<" seconds"<<endl; + + if(secs < 4) + result= NDBT_OK; + else + goto done; + + ndb_mgm_set_connectstring(h, mgm); + + ndbout << "TEST connect timeout" << endl; + + ndb_mgm_destroy_handle(&h); + + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, "1.1.1.1"); + + ndbout << "TEST connect timeout (invalid host)" << endl; + + ndb_mgm_set_timeout(h, 3000); + + timerclear(&tstart); + timerclear(&tend); + gettimeofday(&tstart,NULL); + + ndb_mgm_connect(h,0,0,0); + + gettimeofday(&tend,NULL); + + secs= tend.tv_sec - tstart.tv_sec; + ndbout << "Took about: " << secs <<" seconds"<<endl; + + if(secs < 4) + result= NDBT_OK; + else + result= NDBT_FAILED; + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestApiTimeoutBasic(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_FAILED; + int cc= 0; + int mgmd_nodeid= 0; + ndb_mgm_reply reply; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + ndbout << "TEST timout check_connection" << endl; + int errs[] = { 1, 2, 3, -1}; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndbout << "trying error " << error_ins << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_set_timeout(h,2500); + + cc= ndb_mgm_check_connection(h); + if(cc < 0) + result= NDBT_OK; + else + result= NDBT_FAILED; + + if(ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: still connected" << endl; + result= NDBT_FAILED; + } + } + + ndbout << "TEST get_mgmd_nodeid" << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_insert_error(h, mgmd_nodeid, 0, &reply)< 0) + { + ndbout << "failed to remove inserted error " << endl; + result= NDBT_FAILED; + goto done; + } + + cc= ndb_mgm_get_mgmd_nodeid(h); + ndbout << "got node id: " << cc << endl; + if(cc==0) + { + ndbout << "FAILED: didn't get node id" << endl; + result= NDBT_FAILED; + } + else + result= NDBT_OK; + + ndbout << "TEST end_session" << endl; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_insert_error(h, mgmd_nodeid, 4, &reply)< 0) + { + ndbout << "FAILED: insert error 1" << endl; + result= NDBT_FAILED; + goto done; + } + + cc= ndb_mgm_end_session(h); + if(cc==0) + { + ndbout << "FAILED: success in calling end_session" << endl; + result= NDBT_FAILED; + } + else if(ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + else + result= NDBT_OK; + + if(ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestApiGetStatusTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int cc= 0; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 0, 5, 6, 7, 8, 9, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + struct ndb_mgm_cluster_state *cl= ndb_mgm_get_status(h); + + if(cl!=NULL) + free(cl); + + /* + * For whatever strange reason, + * get_status is okay with not having the last enter there. + * instead of "fixing" the api, let's have a special case + * so we don't break any behaviour + */ + + if(error_ins!=0 && error_ins!=9 && cl!=NULL) + { + ndbout << "FAILED: got a ndb_mgm_cluster_state back" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && error_ins!=9 && ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && error_ins!=9 && ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiGetConfigTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 0, 1, 2, 3, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + struct ndb_mgm_configuration *c= ndb_mgm_get_configuration(h,0); + + if(c!=NULL) + free(c); + + if(error_ins!=0 && c!=NULL) + { + ndbout << "FAILED: got a ndb_mgm_configuration back" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + if(error_ins!=0 && ndb_mgm_get_latest_error(h)!=ETIMEDOUT) + { + ndbout << "FAILED: Incorrect error code (" << ndb_mgm_get_latest_error(h) + << " != expected " << ETIMEDOUT << ") desc: " + << ndb_mgm_get_latest_error_desc(h) + << " line: " << ndb_mgm_get_latest_error_line(h) + << " msg: " << ndb_mgm_get_latest_error_msg(h) + << endl; + result= NDBT_FAILED; + } + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiEventTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 10000, 0, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, + 1, NDB_MGM_EVENT_CATEGORY_STARTUP, + 0 }; + int fd= ndb_mgm_listen_event(h, filter); + + if(fd==NDB_INVALID_SOCKET) + { + ndbout << "FAILED: could not listen to event" << endl; + result= NDBT_FAILED; + } + + Uint32 theData[25]; + EventReport *fake_event = (EventReport*)theData; + fake_event->setEventType(NDB_LE_NDBStopForced); + fake_event->setNodeId(42); + theData[2]= 0; + theData[3]= 0; + theData[4]= 0; + theData[5]= 0; + + ndb_mgm_report_event(h, theData, 6); + + char *tmp= 0; + char buf[512]; + SocketInputStream in(fd,2000); + for(int i=0; i<20; i++) + { + if((tmp = in.gets(buf, sizeof(buf)))) + { +// const char ping_token[]="<PING>"; +// if(memcmp(ping_token,tmp,sizeof(ping_token)-1)) + if(tmp && strlen(tmp)) + ndbout << tmp; + } + else + { + if(in.timedout()) + { + ndbout << "TIMED OUT READING EVENT at iteration " << i << endl; + break; + } + } + } + + /* + * events go through a *DIFFERENT* socket than the NdbMgmHandle + * so we should still be connected (and be able to check_connection) + * + */ + + if(ndb_mgm_check_connection(h) && !ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + ndb_mgm_disconnect(h); + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} + +int runTestMgmApiStructEventTimeout(NDBT_Context* ctx, NDBT_Step* step) +{ + char *mgm= ctx->getRemoteMgm(); + int result= NDBT_OK; + int mgmd_nodeid= 0; + + NdbMgmHandle h; + h= ndb_mgm_create_handle(); + ndb_mgm_set_connectstring(h, mgm); + + int errs[] = { 10000, 0, -1 }; + + for(int error_ins_no=0; errs[error_ins_no]!=-1; error_ins_no++) + { + int error_ins= errs[error_ins_no]; + ndb_mgm_connect(h,0,0,0); + + if(ndb_mgm_check_connection(h) < 0) + { + result= NDBT_FAILED; + goto done; + } + + mgmd_nodeid= ndb_mgm_get_mgmd_nodeid(h); + if(mgmd_nodeid==0) + { + ndbout << "Failed to get mgmd node id to insert error" << endl; + result= NDBT_FAILED; + goto done; + } + + ndb_mgm_reply reply; + reply.return_code= 0; + + if(ndb_mgm_insert_error(h, mgmd_nodeid, error_ins, &reply)< 0) + { + ndbout << "failed to insert error " << error_ins << endl; + result= NDBT_FAILED; + } + + ndbout << "trying error: " << error_ins << endl; + + ndb_mgm_set_timeout(h,2500); + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, + 1, NDB_MGM_EVENT_CATEGORY_STARTUP, + 0 }; + NdbLogEventHandle le_handle= ndb_mgm_create_logevent_handle(h, filter); + + struct ndb_logevent le; + for(int i=0; i<20; i++) + { + if(error_ins==0 || (error_ins!=0 && i<5)) + { + Uint32 theData[25]; + EventReport *fake_event = (EventReport*)theData; + fake_event->setEventType(NDB_LE_NDBStopForced); + fake_event->setNodeId(42); + theData[2]= 0; + theData[3]= 0; + theData[4]= 0; + theData[5]= 0; + + ndb_mgm_report_event(h, theData, 6); + } + int r= ndb_logevent_get_next(le_handle, &le, 2500); + if(r>0) + { + ndbout << "Receieved event" << endl; + } + else if(r<0) + { + ndbout << "ERROR" << endl; + } + else // no event + { + ndbout << "TIMED OUT READING EVENT at iteration " << i << endl; + if(error_ins==0) + result= NDBT_FAILED; + else + result= NDBT_OK; + break; + } + } + + /* + * events go through a *DIFFERENT* socket than the NdbMgmHandle + * so we should still be connected (and be able to check_connection) + * + */ + + if(ndb_mgm_check_connection(h) && !ndb_mgm_is_connected(h)) + { + ndbout << "FAILED: is still connected after error" << endl; + result= NDBT_FAILED; + } + + ndb_mgm_disconnect(h); + } + +done: + ndb_mgm_disconnect(h); + ndb_mgm_destroy_handle(&h); + + return result; +} NDBT_TESTSUITE(testMgm); TESTCASE("SingleUserMode", @@ -219,6 +798,36 @@ TESTCASE("ApiSessionFailure", INITIALIZER(runTestApiSession); } +TESTCASE("ApiConnectTimeout", + "Connect timeout tests for MGMAPI"){ + INITIALIZER(runTestApiConnectTimeout); + +} +TESTCASE("ApiTimeoutBasic", + "Basic timeout tests for MGMAPI"){ + INITIALIZER(runTestApiTimeoutBasic); + +} +TESTCASE("ApiGetStatusTimeout", + "Test timeout for MGMAPI getStatus"){ + INITIALIZER(runTestApiGetStatusTimeout); + +} +TESTCASE("ApiGetConfigTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiGetConfigTimeout); + +} +TESTCASE("ApiMgmEventTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiEventTimeout); + +} +TESTCASE("ApiMgmStructEventTimeout", + "Test timeouts for mgmapi get_configuration"){ + INITIALIZER(runTestMgmApiStructEventTimeout); + +} NDBT_TESTSUITE_END(testMgm); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testNdbApi.cpp b/storage/ndb/test/ndbapi/testNdbApi.cpp index 7375e711d16..c05a2417bca 100644 --- a/storage/ndb/test/ndbapi/testNdbApi.cpp +++ b/storage/ndb/test/ndbapi/testNdbApi.cpp @@ -1249,6 +1249,274 @@ int runScan_4006(NDBT_Context* ctx, NDBT_Step* step){ return result; } +char pkIdxName[255]; + +int createPkIndex(NDBT_Context* ctx, NDBT_Step* step){ + bool orderedIndex = ctx->getProperty("OrderedIndex", (unsigned)0); + + const NdbDictionary::Table* pTab = ctx->getTab(); + Ndb* pNdb = GETNDB(step); + + bool logged = ctx->getProperty("LoggedIndexes", 1); + + // Create index + BaseString::snprintf(pkIdxName, 255, "IDC_PK_%s", pTab->getName()); + if (orderedIndex) + ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "ordered index " + << pkIdxName << " ("; + else + ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "unique index " + << pkIdxName << " ("; + + NdbDictionary::Index pIdx(pkIdxName); + pIdx.setTable(pTab->getName()); + if (orderedIndex) + pIdx.setType(NdbDictionary::Index::OrderedIndex); + else + pIdx.setType(NdbDictionary::Index::UniqueHashIndex); + for (int c = 0; c< pTab->getNoOfColumns(); c++){ + const NdbDictionary::Column * col = pTab->getColumn(c); + if(col->getPrimaryKey()){ + pIdx.addIndexColumn(col->getName()); + ndbout << col->getName() <<" "; + } + } + + pIdx.setStoredIndex(logged); + ndbout << ") "; + if (pNdb->getDictionary()->createIndex(pIdx) != 0){ + ndbout << "FAILED!" << endl; + const NdbError err = pNdb->getDictionary()->getNdbError(); + ERR(err); + return NDBT_FAILED; + } + + ndbout << "OK!" << endl; + return NDBT_OK; +} + +int createPkIndex_Drop(NDBT_Context* ctx, NDBT_Step* step){ + const NdbDictionary::Table* pTab = ctx->getTab(); + Ndb* pNdb = GETNDB(step); + + // Drop index + ndbout << "Dropping index " << pkIdxName << " "; + if (pNdb->getDictionary()->dropIndex(pkIdxName, + pTab->getName()) != 0){ + ndbout << "FAILED!" << endl; + ERR(pNdb->getDictionary()->getNdbError()); + return NDBT_FAILED; + } else { + ndbout << "OK!" << endl; + } + + return NDBT_OK; +} + +static +int +op_row(NdbTransaction* pTrans, HugoOperations& hugoOps, + const NdbDictionary::Table* pTab, int op, int row) +{ + NdbOperation * pOp = 0; + switch(op){ + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + pOp = pTrans->getNdbOperation(pTab->getName()); + break; + case 9: + return 0; + case 6: + case 7: + case 8: + case 10: + case 11: + pOp = pTrans->getNdbIndexOperation(pkIdxName, pTab->getName()); + default: + break; + } + + switch(op){ + case 0: + case 6: + pOp->readTuple(); + break; + case 1: + case 7: + pOp->committedRead(); + break; + case 2: + case 8: + pOp->readTupleExclusive(); + break; + case 3: + case 9: + pOp->insertTuple(); + break; + case 4: + case 10: + pOp->updateTuple(); + break; + case 5: + case 11: + pOp->deleteTuple(); + break; + default: + abort(); + } + + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + if (pTab->getColumn(a)->getPrimaryKey() == true){ + if(hugoOps.equalForAttr(pOp, a, row) != 0){ + return NDBT_FAILED; + } + } + } + + switch(op){ + case 0: + case 1: + case 2: + case 6: + case 7: + case 8: + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + pOp->getValue(a); + } + break; + case 3: + case 4: + case 10: + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + if (pTab->getColumn(a)->getPrimaryKey() == false){ + if(hugoOps.setValueForAttr(pOp, a, row, 2) != 0){ + return NDBT_FAILED; + } + } + } + break; + case 5: + case 11: + pOp->deleteTuple(); + break; + case 9: + default: + abort(); + } + + return NDBT_OK; +} + +static void print(int op) +{ + const char * str = 0; + switch(op){ + case 0: str = "pk read-sh"; break; + case 1: str = "pk read-nl"; break; + case 2: str = "pk read-ex"; break; + case 3: str = "pk insert "; break; + case 4: str = "pk update "; break; + case 5: str = "pk delete "; break; + case 6: str = "uk read-sh"; break; + case 7: str = "uk read-nl"; break; + case 8: str = "uk read-ex"; break; + case 9: str = "noop "; break; + case 10: str = "uk update "; break; + case 11: str = "uk delete "; break; + default: + abort(); + } + printf("%s ", str); +} + +int +runTestIgnoreError(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + Uint32 loops = ctx->getNumRecords(); + const NdbDictionary::Table* pTab = ctx->getTab(); + + HugoOperations hugoOps(*pTab); + HugoTransactions hugoTrans(*pTab); + + Ndb* pNdb = GETNDB(step); + + struct { + ExecType et; + AbortOption ao; + } tests[] = { + { Commit, AbortOnError }, + { Commit, AO_IgnoreError }, + { NoCommit, AbortOnError }, + { NoCommit, AO_IgnoreError }, + }; + + printf("case: <op1> <op2> c/nc ao/ie\n"); + Uint32 tno = 0; + for (Uint32 op1 = 0; op1 < 12; op1++) + { + for (Uint32 op2 = op1; op2 < 12; op2++) + { + int ret; + NdbTransaction* pTrans = 0; + + for (Uint32 i = 0; i<4; i++, tno++) + { + if (loops != 1000 && loops != tno) + continue; + ExecType et = tests[i].et; + AbortOption ao = tests[i].ao; + + printf("%.3d : ", tno); + print(op1); + print(op2); + switch(et){ + case Commit: printf("c "); break; + case NoCommit: printf("nc "); break; + } + switch(ao){ + case AbortOnError: printf("aoe "); break; + case AO_IgnoreError: printf("ie "); break; + } + printf(": "); + + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 0); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d ", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 1); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d ", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 0); + op_row(pTrans, hugoOps, pTab, op2, 1); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d\n", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.clearTable(pNdb); + } + } + } + return NDBT_OK; +} + static void testExecuteAsynchCallback(int res, NdbTransaction *con, void *data_ptr) { @@ -1318,7 +1586,6 @@ int runTestExecuteAsynch(NDBT_Context* ctx, NDBT_Step* step){ return result; } - template class Vector<NdbScanOperation*>; int @@ -1442,9 +1709,11 @@ TESTCASE("Scan_4006", INITIALIZER(runScan_4006); FINALIZER(runClearTable); } -TESTCASE("Bug28443", - ""){ - INITIALIZER(runBug28443); +TESTCASE("IgnoreError", ""){ + INITIALIZER(createPkIndex); + STEP(runTestIgnoreError); + FINALIZER(runClearTable); + FINALIZER(createPkIndex_Drop); } TESTCASE("ExecuteAsynch", "Check that executeAsync() works (BUG#27495)\n"){ diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 4b80dd7aaef..419196e00eb 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1236,6 +1236,100 @@ int runBug25554(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25984(NDBT_Context* ctx, NDBT_Step* step){ + + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + + if (restarter.getNumDbNodes() < 2) + return NDBT_OK; + + if (restarter.restartAll(true, true, true)) + return NDBT_FAILED; + + if (restarter.waitClusterNoStart()) + return NDBT_FAILED; + + if (restarter.startAll()) + return NDBT_FAILED; + + if (restarter.waitClusterStarted()) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + int master = restarter.getMasterNodeId(); + int victim = restarter.getRandomNodeOtherNodeGroup(master, rand()); + if (victim == -1) + victim = restarter.getRandomNodeSameNodeGroup(master, rand()); + + restarter.restartOneDbNode(victim, false, true, true); + + for (Uint32 i = 0; i<6; i++) + { + ndbout_c("Loop: %d", i); + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(victim, val2, 2)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(victim, 7016)) + return NDBT_FAILED; + + if (restarter.startNodes(&victim, 1)) + return NDBT_FAILED; + + NdbSleep_SecSleep(3); + } + + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(victim, val2, 2)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(victim, 7170)) + return NDBT_FAILED; + + if (restarter.startNodes(&victim, 1)) + return NDBT_FAILED; + + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.restartAll(false, true, true)) + return NDBT_FAILED; + + if (restarter.insertErrorInAllNodes(932)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(master, 7170)) + return NDBT_FAILED; + + if (restarter.dumpStateAllNodes(val2, 2)) + return NDBT_FAILED; + + restarter.startNodes(&master, 1); + NdbSleep_MilliSleep(3000); + restarter.startAll(); + + if (restarter.waitClusterNoStart()) + return NDBT_FAILED; + + if (restarter.restartOneDbNode(victim, true, true, true)) + return NDBT_FAILED; + + if (restarter.startAll()) + return NDBT_FAILED; + + if (restarter.waitClusterStarted()) + return NDBT_FAILED; + + return NDBT_OK; +} + int runBug26457(NDBT_Context* ctx, NDBT_Step* step) { @@ -1272,7 +1366,7 @@ retry: if (res.waitClusterStarted()) return NDBT_FAILED; } - + return NDBT_OK; } @@ -2080,6 +2174,9 @@ TESTCASE("Bug25468", ""){ TESTCASE("Bug25554", ""){ INITIALIZER(runBug25554); } +TESTCASE("Bug25984", ""){ + INITIALIZER(runBug25984); +} TESTCASE("Bug26457", ""){ INITIALIZER(runBug26457); } @@ -2102,9 +2199,6 @@ TESTCASE("Bug27466", ""){ TESTCASE("Bug28023", ""){ INITIALIZER(runBug28023); } -TESTCASE("Bug25554", ""){ - INITIALIZER(runBug25554); -} TESTCASE("Bug28717", ""){ INITIALIZER(runBug28717); } diff --git a/storage/ndb/test/ndbapi/testOperations.cpp b/storage/ndb/test/ndbapi/testOperations.cpp index 38d6e817637..95be2b988e1 100644 --- a/storage/ndb/test/ndbapi/testOperations.cpp +++ b/storage/ndb/test/ndbapi/testOperations.cpp @@ -99,11 +99,6 @@ OperationTestCase matrix[] = { break; } #define C3(b) if (!(b)) { \ - g_err << "ERR: "<< step->getName() \ - << " failed on line " << __LINE__ << endl; \ - abort(); return NDBT_FAILED; } - -#define C3(b) if (!(b)) { \ g_err << "ERR: failed on line " << __LINE__ << endl; \ return NDBT_FAILED; } diff --git a/storage/ndb/test/ndbapi/testSRBank.cpp b/storage/ndb/test/ndbapi/testSRBank.cpp index dcadf045fff..d0dbadbda75 100644 --- a/storage/ndb/test/ndbapi/testSRBank.cpp +++ b/storage/ndb/test/ndbapi/testSRBank.cpp @@ -20,9 +20,16 @@ #include <NdbBackup.hpp> #include "bank/Bank.hpp" +#include <NdbMixRestarter.hpp> bool disk = false; +#define CHECK(b) if (!(b)) { \ + g_err << "ERR: "<< step->getName() \ + << " failed on line " << __LINE__ << endl; \ + result = NDBT_FAILED; \ + continue; } + int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){ Bank bank(ctx->m_cluster_connection); int overWriteExisting = true; @@ -37,21 +44,25 @@ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){ * SR 1 - shutdown in progress * SR 2 - restart in progress */ -int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){ +int +runBankTimer(NDBT_Context* ctx, NDBT_Step* step){ int wait = 5; // Max seconds between each "day" int yield = 1; // Loops before bank returns - - ctx->incProperty("ThreadCount"); + + ctx->incProperty(NMR_SR_THREADS); while (!ctx->isTestStopped()) { Bank bank(ctx->m_cluster_connection); - while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1) + while(!ctx->isTestStopped() && + ctx->getProperty(NMR_SR) <= NdbMixRestarter::SR_STOPPING) + { if(bank.performIncreaseTime(wait, yield) == NDBT_FAILED) break; - + } + ndbout_c("runBankTimer is stopped"); - ctx->incProperty("ThreadStopped"); - if(ctx->getPropertyWait("SR", (Uint32)0)) + ctx->incProperty(NMR_SR_THREADS_STOPPED); + if(ctx->getPropertyWait(NMR_SR, NdbMixRestarter::SR_RUNNING)) break; } return NDBT_OK; @@ -61,17 +72,18 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){ int wait = 0; // Max ms between each transaction int yield = 1; // Loops before bank returns - ctx->incProperty("ThreadCount"); + ctx->incProperty(NMR_SR_THREADS); while (!ctx->isTestStopped()) { Bank bank(ctx->m_cluster_connection); - while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1) + while(!ctx->isTestStopped() && + ctx->getProperty(NMR_SR) <= NdbMixRestarter::SR_STOPPING) if(bank.performTransactions(0, 1) == NDBT_FAILED) break; ndbout_c("runBankTransactions is stopped"); - ctx->incProperty("ThreadStopped"); - if(ctx->getPropertyWait("SR", (Uint32)0)) + ctx->incProperty(NMR_SR_THREADS_STOPPED); + if(ctx->getPropertyWait(NMR_SR, NdbMixRestarter::SR_RUNNING)) break; } return NDBT_OK; @@ -80,278 +92,108 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){ int yield = 1; // Loops before bank returns int result = NDBT_OK; - - ctx->incProperty("ThreadCount"); + + ctx->incProperty(NMR_SR_THREADS); while (ctx->isTestStopped() == false) { Bank bank(ctx->m_cluster_connection); - while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1) + while(!ctx->isTestStopped() && + ctx->getProperty(NMR_SR) <= NdbMixRestarter::SR_STOPPING) if (bank.performMakeGLs(yield) != NDBT_OK) { - if(ctx->getProperty("SR") != 0) + if(ctx->getProperty(NMR_SR) != NdbMixRestarter::SR_RUNNING) break; ndbout << "bank.performMakeGLs FAILED" << endl; + abort(); return NDBT_FAILED; } ndbout_c("runBankGL is stopped"); - ctx->incProperty("ThreadStopped"); - if(ctx->getPropertyWait("SR", (Uint32)0)) + ctx->incProperty(NMR_SR_THREADS_STOPPED); + if(ctx->getPropertyWait(NMR_SR, NdbMixRestarter::SR_RUNNING)) break; } return NDBT_OK; } -int runBankSum(NDBT_Context* ctx, NDBT_Step* step){ - Bank bank(ctx->m_cluster_connection); - int wait = 2000; // Max ms between each sum of accounts - int yield = 1; // Loops before bank returns - int result = NDBT_OK; - - while (ctx->isTestStopped() == false) { - if (bank.performSumAccounts(wait, yield) != NDBT_OK){ - ndbout << "bank.performSumAccounts FAILED" << endl; - result = NDBT_FAILED; - } - } - return result ; -} - -#define CHECK(b) if (!(b)) { \ - g_err << "ERR: "<< step->getName() \ - << " failed on line " << __LINE__ << endl; \ - result = NDBT_FAILED; \ - continue; } - -static -int -restart_cluster(NDBT_Context* ctx, NDBT_Step* step, NdbRestarter& restarter) +int +runBankSrValidator(NDBT_Context* ctx, NDBT_Step* step) { - bool abort = true; - int timeout = 180; - int result = NDBT_OK; - do + ctx->incProperty(NMR_SR_VALIDATE_THREADS); + + while(!ctx->isTestStopped()) { - ndbout << " -- Shutting down " << endl; - ctx->setProperty("SR", 1); - CHECK(restarter.restartAll(false, true, abort) == 0); - ctx->setProperty("SR", 2); - CHECK(restarter.waitClusterNoStart(timeout) == 0); + if (ctx->getPropertyWait(NMR_SR, NdbMixRestarter::SR_VALIDATING)) + break; - Uint32 cnt = ctx->getProperty("ThreadCount"); - Uint32 curr= ctx->getProperty("ThreadStopped"); - while(curr != cnt && !ctx->isTestStopped()) + int wait = 0; + int yield = 1; + Bank bank(ctx->m_cluster_connection); + if (bank.performSumAccounts(wait, yield) != 0) { - ndbout_c("%d %d", curr, cnt); - NdbSleep_MilliSleep(100); - curr= ctx->getProperty("ThreadStopped"); + ndbout << "bank.performSumAccounts FAILED" << endl; + abort(); + return NDBT_FAILED; } - ctx->setProperty("ThreadStopped", (Uint32)0); - CHECK(restarter.startAll() == 0); - CHECK(restarter.waitClusterStarted(timeout) == 0); - - ndbout << " -- Validating starts " << endl; + if (bank.performValidateAllGLs() != 0) { - int wait = 0; - int yield = 1; - Bank bank(ctx->m_cluster_connection); - if (bank.performSumAccounts(wait, yield) != 0) - { - ndbout << "bank.performSumAccounts FAILED" << endl; - return NDBT_FAILED; - } - - if (bank.performValidateAllGLs() != 0) - { - ndbout << "bank.performValidateAllGLs FAILED" << endl; - return NDBT_FAILED; - } + ndbout << "bank.performValidateAllGLs FAILED" << endl; + abort(); + return NDBT_FAILED; } - ndbout << " -- Validating complete " << endl; - } while(0); - ctx->setProperty("SR", (Uint32)0); - ctx->broadcast(); - return result; -} - -static -ndb_mgm_node_state* -select_node_to_stop(Vector<ndb_mgm_node_state>& nodes) -{ - Uint32 i, j; - Vector<ndb_mgm_node_state*> alive_nodes; - for(i = 0; i<nodes.size(); i++) - { - ndb_mgm_node_state* node = &nodes[i]; - if (node->node_status == NDB_MGM_NODE_STATUS_STARTED) - alive_nodes.push_back(node); - } - - Vector<ndb_mgm_node_state*> victims; - // Remove those with one in node group - for(i = 0; i<alive_nodes.size(); i++) - { - int group = alive_nodes[i]->node_group; - for(j = 0; j<alive_nodes.size(); j++) - { - if (i != j && alive_nodes[j]->node_group == group) - { - victims.push_back(alive_nodes[i]); - break; - } - } - } - - if (victims.size()) - { - int victim = rand() % victims.size(); - return victims[victim]; - } - else - { - return 0; + ctx->incProperty(NMR_SR_VALIDATE_THREADS_DONE); + + if (ctx->getPropertyWait(NMR_SR, NdbMixRestarter::SR_RUNNING)) + break; } + + return NDBT_OK; } -static -ndb_mgm_node_state* -select_node_to_start(Vector<ndb_mgm_node_state>& nodes) -{ - Uint32 i, j; - Vector<ndb_mgm_node_state*> victims; - for(i = 0; i<nodes.size(); i++) - { - ndb_mgm_node_state* node = &nodes[i]; - if (node->node_status == NDB_MGM_NODE_STATUS_NOT_STARTED) - victims.push_back(node); - } +#if 0 +int runBankSum(NDBT_Context* ctx, NDBT_Step* step){ + Bank bank(ctx->m_cluster_connection); + int wait = 2000; // Max ms between each sum of accounts + int yield = 1; // Loops before bank returns + int result = NDBT_OK; - if (victims.size()) - { - int victim = rand() % victims.size(); - return victims[victim]; - } - else + while (ctx->isTestStopped() == false) { - return 0; + if (bank.performSumAccounts(wait, yield) != NDBT_OK){ + ndbout << "bank.performSumAccounts FAILED" << endl; + result = NDBT_FAILED; + } } + return result ; } +#endif -enum Action { - AA_RestartCluster = 0x1, - AA_RestartNode = 0x2, - AA_StopNode = 0x4, - AA_StartNode = 0x8, - AA_COUNT = 4 -}; int runMixRestart(NDBT_Context* ctx, NDBT_Step* step) { int result = NDBT_OK; + NdbMixRestarter res; int runtime = ctx->getNumLoops(); int sleeptime = ctx->getNumRecords(); - NdbRestarter restarter; - int timeout = 180; - Uint32 type = ctx->getProperty("Type", ~(Uint32)0); - - restarter.waitClusterStarted(); - Vector<ndb_mgm_node_state> nodes; - nodes = restarter.ndbNodes; -#if 0 - for (Uint32 i = 0; i<restarter.ndbNodes.size(); i++) - nodes.push_back(restarter.ndbNodes[i]); -#endif + Uint32 mask = ctx->getProperty("Type", ~(Uint32)0); + res.setRestartTypeMask(mask); - - Uint32 now; - const Uint32 stop = time(0)+ runtime; - while(!ctx->isTestStopped() && ((now= time(0)) < stop) && result == NDBT_OK) + if (res.runPeriod(ctx, step, runtime, sleeptime)) { - ndbout << " -- Sleep " << sleeptime << "s " << endl; - int cnt = sleeptime; - while (cnt-- && !ctx->isTestStopped()) - NdbSleep_SecSleep(1); - if (ctx->isTestStopped()) - return NDBT_FAILED; - - ndb_mgm_node_state* node = 0; - int action; -loop: - while(((action = (1 << (rand() % AA_COUNT))) & type) == 0); - switch(action){ - case AA_RestartCluster: - if (restart_cluster(ctx, step, restarter)) - return NDBT_FAILED; - for (Uint32 i = 0; i<nodes.size(); i++) - nodes[i].node_status = NDB_MGM_NODE_STATUS_STARTED; - break; - case AA_RestartNode: - case AA_StopNode: - { - if ((node = select_node_to_stop(nodes)) == 0) - goto loop; - - if (action == AA_RestartNode) - g_err << "Restarting " << node->node_id << endl; - else - g_err << "Stopping " << node->node_id << endl; - - if (restarter.restartOneDbNode(node->node_id, false, true, true)) - return NDBT_FAILED; - - if (restarter.waitNodesNoStart(&node->node_id, 1)) - return NDBT_FAILED; - - node->node_status = NDB_MGM_NODE_STATUS_NOT_STARTED; - - if (action == AA_StopNode) - break; - else - goto start; - } - case AA_StartNode: - if ((node = select_node_to_start(nodes)) == 0) - goto loop; - start: - g_err << "Starting " << node->node_id << endl; - if (restarter.startNodes(&node->node_id, 1)) - return NDBT_FAILED; - if (restarter.waitNodesStarted(&node->node_id, 1)) - return NDBT_FAILED; - - node->node_status = NDB_MGM_NODE_STATUS_STARTED; - break; - } - } - - Vector<int> not_started; - { - ndb_mgm_node_state* node = 0; - while((node = select_node_to_start(nodes))) - { - not_started.push_back(node->node_id); - node->node_status = NDB_MGM_NODE_STATUS_STARTED; - } - } - - if (not_started.size()) - { - g_err << "Starting stopped nodes " << endl; - if (restarter.startNodes(not_started.getBase(), not_started.size())) - return NDBT_FAILED; - if (restarter.waitClusterStarted()) - return NDBT_FAILED; + abort(); + return NDBT_FAILED; } ctx->stopTest(); return NDBT_OK; } -int runDropBank(NDBT_Context* ctx, NDBT_Step* step){ +int +runDropBank(NDBT_Context* ctx, NDBT_Step* step){ Bank bank(ctx->m_cluster_connection); if (bank.dropBank() != NDBT_OK) return NDBT_FAILED; @@ -367,7 +209,7 @@ TESTCASE("SR", "3. Restart ndb and verify consistency\n" "4. Drop bank\n") { - TC_PROPERTY("Type", AA_RestartCluster); + TC_PROPERTY("Type", NdbMixRestarter::RTM_SR); INITIALIZER(runCreateBank); STEP(runBankTimer); STEP(runBankTransactions); @@ -381,6 +223,7 @@ TESTCASE("SR", STEP(runBankTransactions); STEP(runBankTransactions); STEP(runBankGL); + STEP(runBankSrValidator); STEP(runMixRestart); } TESTCASE("NR", @@ -390,7 +233,7 @@ TESTCASE("NR", "3. Restart ndb and verify consistency\n" "4. Drop bank\n") { - TC_PROPERTY("Type", AA_RestartNode | AA_StopNode | AA_StartNode); + TC_PROPERTY("Type", NdbMixRestarter::RTM_NR); INITIALIZER(runCreateBank); STEP(runBankTimer); STEP(runBankTransactions); @@ -414,7 +257,7 @@ TESTCASE("Mix", "3. Restart ndb and verify consistency\n" "4. Drop bank\n") { - TC_PROPERTY("Type", ~0); + TC_PROPERTY("Type", NdbMixRestarter::RTM_ALL); INITIALIZER(runCreateBank); STEP(runBankTimer); STEP(runBankTransactions); @@ -429,6 +272,7 @@ TESTCASE("Mix", STEP(runBankTransactions); STEP(runBankGL); STEP(runMixRestart); + STEP(runBankSrValidator); FINALIZER(runDropBank); } NDBT_TESTSUITE_END(testSRBank); diff --git a/storage/ndb/test/ndbapi/testScanFilter.cpp b/storage/ndb/test/ndbapi/testScanFilter.cpp new file mode 100644 index 00000000000..dfe1097bd25 --- /dev/null +++ b/storage/ndb/test/ndbapi/testScanFilter.cpp @@ -0,0 +1,860 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <NDBT.hpp> +#include <NDBT_Test.hpp> + +#define ERR_EXIT(obj, msg) \ +do \ +{ \ +fprintf(stderr, "%s: %s (%d) in %s:%d\n", \ +msg, obj->getNdbError().message, obj->getNdbError().code, __FILE__, __LINE__); \ +exit(-1); \ +} \ +while (0); + +#define PRINT_ERROR(code,msg) \ +do \ +{ \ +fprintf(stderr, "Error in %s, line: %d, code: %d, msg: %s.\n", __FILE__, __LINE__, code, msg); \ +} \ +while (0); + +#define MYSQLERROR(mysql) { \ + PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ + exit(-1); } +#define APIERROR(error) { \ + PRINT_ERROR(error.code,error.message); \ + exit(-1); } + +#define TEST_NAME "TestScanFilter" +#define TABLE_NAME "TABLE_SCAN" + +const char *COL_NAME[] = {"id", "i", "j", "k", "l", "m", "n"}; +const char COL_LEN = 7; +/* +* Not to change TUPLE_NUM, because the column in TABLE_NAME is fixed, +* there are six columns, 'i', 'j', 'k', 'l', 'm', 'n', and each on is equal to 1 or 1, +* Since each tuple should be unique in this case, then TUPLE_NUM = 2 power 6 = 64 +*/ +#ifdef _AIX +/* + IBM xlC_r breaks on the initialization with pow(): + "The expression must be an integral constant expression." +*/ +const int TUPLE_NUM = 64; +#else +const int TUPLE_NUM = (int)pow(2, COL_LEN-1); +#endif + +/* +* the recursive level of random scan filter, can +* modify this parameter more or less, range from +* 1 to 100, larger num consumes more scan time +*/ +const int RECURSIVE_LEVEL = 10; + +const int MAX_STR_LEN = (RECURSIVE_LEVEL * (COL_LEN+1) * 4); + +/* +* Each time stands for one test, it will produce a random +* filter string, and scan through ndb api and through +* calculation with tuples' data, then compare the result, +* if they are equal, this test passed, or failed. +* Only all TEST_NUM times tests passed, we can believe +* the suite of test cases are okay. +* Change TEST_NUM to larger will need more time to test +*/ +const int TEST_NUM = 5000; + + +/* Table definition*/ +static +const +NDBT_Attribute MYTAB1Attribs[] = { + NDBT_Attribute("id", NdbDictionary::Column::Unsigned, 1, true), + NDBT_Attribute("i", NdbDictionary::Column::Unsigned), + NDBT_Attribute("j", NdbDictionary::Column::Unsigned), + NDBT_Attribute("k", NdbDictionary::Column::Unsigned), + NDBT_Attribute("l", NdbDictionary::Column::Unsigned), + NDBT_Attribute("m", NdbDictionary::Column::Unsigned), + NDBT_Attribute("n", NdbDictionary::Column::Unsigned), +}; +static +const +NDBT_Table MYTAB1(TABLE_NAME, sizeof(MYTAB1Attribs)/sizeof(NDBT_Attribute), MYTAB1Attribs); + + +int createTable(Ndb* pNdb, const NdbDictionary::Table* tab, bool _temp, + bool existsOk, NDBT_CreateTableHook f) +{ + int r = 0; + do{ + NdbDictionary::Table tmpTab(* tab); + tmpTab.setStoredTable(_temp ? 0 : 1); + if(f != 0 && f(pNdb, tmpTab, 0, NULL)) + { + ndbout << "Failed to create table" << endl; + return NDBT_FAILED; + } + r = pNdb->getDictionary()->createTable(tmpTab); + if(r == -1){ + if(!existsOk){ + ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl; + break; + } + if(pNdb->getDictionary()->getNdbError().code != 721){ + ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl; + break; + } + r = 0; + } + }while(false); + + return r; +} + +/* +* Function to produce the tuples' data +*/ +int runPopulate(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb *myNdb = GETNDB(step); + const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); + const NdbDictionary::Table *myTable= myDict->getTable(TABLE_NAME); + if(myTable == NULL) + APIERROR(myDict->getNdbError()); + + NdbTransaction* myTrans = myNdb->startTransaction(); + if (myTrans == NULL) + APIERROR(myNdb->getNdbError()); + + for(int num = 0; num < TUPLE_NUM; num++) + { + NdbOperation* myNdbOperation = myTrans->getNdbOperation(myTable); + if(myNdbOperation == NULL) + { + APIERROR(myTrans->getNdbError()); + } + +/* the tuples' data in TABLE_NAME ++----+---+---+---+---+---+---+ +| id | i | j | k | l | m | n | ++----+---+---+---+---+---+---+ +| 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| 1 | 0 | 0 | 0 | 0 | 0 | 1 | +| 2 | 0 | 0 | 0 | 0 | 1 | 0 | +| 3 | 0 | 0 | 0 | 0 | 1 | 1 | +| 4 | 0 | 0 | 0 | 1 | 0 | 0 | +| 5 | 0 | 0 | 0 | 1 | 0 | 1 | +| 6 | 0 | 0 | 0 | 1 | 1 | 0 | +| 7 | 0 | 0 | 0 | 1 | 1 | 1 | +| 8 | 0 | 0 | 1 | 0 | 0 | 0 | +| 9 | 0 | 0 | 1 | 0 | 0 | 1 | +| 10 | 0 | 0 | 1 | 0 | 1 | 0 | +| 11 | 0 | 0 | 1 | 0 | 1 | 1 | +| 12 | 0 | 0 | 1 | 1 | 0 | 0 | +| 13 | 0 | 0 | 1 | 1 | 0 | 1 | +| 14 | 0 | 0 | 1 | 1 | 1 | 0 | +| 15 | 0 | 0 | 1 | 1 | 1 | 1 | +| 16 | 0 | 1 | 0 | 0 | 0 | 0 | +| 17 | 0 | 1 | 0 | 0 | 0 | 1 | +| 18 | 0 | 1 | 0 | 0 | 1 | 0 | +| 19 | 0 | 1 | 0 | 0 | 1 | 1 | +| 20 | 0 | 1 | 0 | 1 | 0 | 0 | +| 21 | 0 | 1 | 0 | 1 | 0 | 1 | +| 22 | 0 | 1 | 0 | 1 | 1 | 0 | +| 23 | 0 | 1 | 0 | 1 | 1 | 1 | +| 24 | 0 | 1 | 1 | 0 | 0 | 0 | +| 25 | 0 | 1 | 1 | 0 | 0 | 1 | +| 26 | 0 | 1 | 1 | 0 | 1 | 0 | +| 27 | 0 | 1 | 1 | 0 | 1 | 1 | +| 28 | 0 | 1 | 1 | 1 | 0 | 0 | +| 29 | 0 | 1 | 1 | 1 | 0 | 1 | +| 30 | 0 | 1 | 1 | 1 | 1 | 0 | +| 31 | 0 | 1 | 1 | 1 | 1 | 1 | +| 32 | 1 | 0 | 0 | 0 | 0 | 0 | +| 33 | 1 | 0 | 0 | 0 | 0 | 1 | +| 34 | 1 | 0 | 0 | 0 | 1 | 0 | +| 35 | 1 | 0 | 0 | 0 | 1 | 1 | +| 36 | 1 | 0 | 0 | 1 | 0 | 0 | +| 37 | 1 | 0 | 0 | 1 | 0 | 1 | +| 38 | 1 | 0 | 0 | 1 | 1 | 0 | +| 39 | 1 | 0 | 0 | 1 | 1 | 1 | +| 40 | 1 | 0 | 1 | 0 | 0 | 0 | +| 41 | 1 | 0 | 1 | 0 | 0 | 1 | +| 42 | 1 | 0 | 1 | 0 | 1 | 0 | +| 43 | 1 | 0 | 1 | 0 | 1 | 1 | +| 44 | 1 | 0 | 1 | 1 | 0 | 0 | +| 45 | 1 | 0 | 1 | 1 | 0 | 1 | +| 46 | 1 | 0 | 1 | 1 | 1 | 0 | +| 47 | 1 | 0 | 1 | 1 | 1 | 1 | +| 48 | 1 | 1 | 0 | 0 | 0 | 0 | +| 49 | 1 | 1 | 0 | 0 | 0 | 1 | +| 50 | 1 | 1 | 0 | 0 | 1 | 0 | +| 51 | 1 | 1 | 0 | 0 | 1 | 1 | +| 52 | 1 | 1 | 0 | 1 | 0 | 0 | +| 53 | 1 | 1 | 0 | 1 | 0 | 1 | +| 54 | 1 | 1 | 0 | 1 | 1 | 0 | +| 55 | 1 | 1 | 0 | 1 | 1 | 1 | +| 56 | 1 | 1 | 1 | 0 | 0 | 0 | +| 57 | 1 | 1 | 1 | 0 | 0 | 1 | +| 58 | 1 | 1 | 1 | 0 | 1 | 0 | +| 59 | 1 | 1 | 1 | 0 | 1 | 1 | +| 60 | 1 | 1 | 1 | 1 | 0 | 0 | +| 61 | 1 | 1 | 1 | 1 | 0 | 1 | +| 62 | 1 | 1 | 1 | 1 | 1 | 0 | +| 63 | 1 | 1 | 1 | 1 | 1 | 1 | ++----+---+---+---+---+---+---+ +*/ + myNdbOperation->insertTuple(); + myNdbOperation->equal(COL_NAME[0], num); + for(int col = 1; col < COL_LEN; col++) + { + myNdbOperation->setValue(COL_NAME[col], (num>>(COL_LEN-1-col))&1); + } + } + + int check = myTrans->execute(NdbTransaction::Commit); + + myTrans->close(); + + if (check == -1) + return NDBT_FAILED; + else + return NDBT_OK; + +} + + + +/* +* a=AND, o=OR, A=NAND, O=NOR +*/ +char op_string[] = "aoAO"; +/* +* the six columns' name of test table +*/ +char col_string[] = "ijklmn"; +const int op_len = strlen(op_string); +const int col_len = strlen(col_string); + +/* +* get a random op from "aoAO" +*/ +int get_rand_op_ch(char *ch) +{ + static unsigned int num = 0; + if(++num == 0) + num = 1; + srand(num*time(NULL)); + *ch = op_string[rand() % op_len]; + return 1; +} + +/* +* get a random order form of "ijklmn" trough exchanging letter +*/ +void change_col_order() +{ + int pos1,pos2; + char temp; + for (int i = 0; i < 10; i++) //exchange for 10 times + { + srand(time(NULL)/(i+1)); + pos1 = rand() % col_len; + srand((i+1)*time(NULL)); + pos2 = rand() % col_len; + if (pos1 == pos2) + continue; + temp = col_string[pos1]; + col_string[pos1] = col_string[pos2]; + col_string[pos2] = temp; + } +} + +/* +* get a random sub string of "ijklmn" +*/ +int get_rand_col_str(char *str) +{ + int len; + static unsigned int num = 0; + if(++num == 0) + num = 1; + srand(num*time(NULL)); + len = rand() % col_len + 1; + change_col_order(); + snprintf(str, len+1, "%s", col_string); //len+1, including '\0' + return len; +} + +/* +* get a random string including operation and column +* eg, Alnikx +*/ +int get_rand_op_str(char *str) +{ + char temp[256]; + int len1, len2, len; + len1 = get_rand_op_ch(temp); + len2 = get_rand_col_str(temp+len1); + len = len1 + len2; + temp[len] = 'x'; + snprintf(str, len+1+1, "%s", temp); //len+1, including '\0' + return len+1; +} + +/* +* replace a letter of source string with a new string +* e.g., source string: 'Aijkx', replace i with new string 'olmx' +* then source string is changed to 'Aolmxjkx' +* source: its format should be produced from get_rand_op_str() +* pos: range from 1 to strlen(source)-2 +*/ +int replace_a_to_str(char *source, int pos, char *newstr) +{ + char temp[MAX_STR_LEN]; + snprintf(temp, pos+1, "%s", source); + snprintf(temp+pos, strlen(newstr)+1, "%s", newstr); + snprintf(temp+pos+strlen(newstr), strlen(source)-pos, "%s", source+pos+1); + snprintf(source, strlen(temp)+1, "%s", temp); + return strlen(source); +} + +/* +* check whether the inputed char is an operation +*/ +bool check_op(char ch) +{ + if( ch == 'a' || ch == 'A' || ch == 'o' || ch == 'O') + return true; + else + return false; +} + +/* +* check whether the inputed char is end flag +*/ +bool check_end(char ch) +{ + return (ch == 'x'); +} + +/* +* check whether the inputed char is end flag +*/ +bool check_col(char ch) +{ + if( ch == 'i' || ch == 'j' || ch == 'k' + || ch == 'l' || ch == 'm' || ch == 'n' ) + return true; + else + return false; +} + +/* +* To ensure we can get a random string with RECURSIVE_LEVEL, +* we need a position where can replace a letter with a new string. +*/ +int get_rand_replace_pos(char *str, int len) +{ + int pos_op = 0; + int pos_x = 0; + int pos_col = 0; + int span = 0; + static int num = 0; + char temp; + + for(int i = 0; i < len; i++) + { + temp = str[i]; + if(! check_end(temp)) + { + if(check_op(temp)) + pos_op = i; + } + else + { + pos_x = i; + break; + } + } + + if(++num == 0) + num = 1; + + span = pos_x - pos_op - 1; + if(span <= 1) + { + pos_col = pos_op + 1; + } + else + { + srand(num*time(NULL)); + pos_col = pos_op + rand() % span + 1; + } + return pos_col; +} + +/* +* Check whether the given random string is valid +* and applicable for this test case +*/ +bool check_random_str(char *str) +{ + char *p; + int op_num = 0; + int end_num = 0; + + for(p = str; *p; p++) + { + bool tmp1 = false, tmp2 = false; + if(tmp1 = check_op(*p)) + op_num++; + if(tmp2 = check_end(*p)) + end_num++; + if(!(tmp1 || tmp2 || check_col(*p))) //there are illegal letters + return false; + } + + if(op_num != end_num) //begins are not equal to ends + return false; + + return true; +} + +/* +* Get a random string with RECURSIVE_LEVEL +*/ +void get_rand_op_str_compound(char *str) +{ + char small_str[256]; + int pos; + int tmp; + int level; + static int num = 0; + + if(++num == 0) + num = 1; + + srand(num*time(NULL)); + level = 1 + rand() % RECURSIVE_LEVEL; + + get_rand_op_str(str); + + for(int i = 0; i < level; i++) + { + get_rand_op_str(small_str); + tmp = strlen(small_str); + get_rand_op_str(small_str + tmp); //get two operations + pos = get_rand_replace_pos(str, strlen(str)); + replace_a_to_str(str, pos, small_str); + } + + //check the random string + if(!check_random_str(str)) + { + fprintf(stderr, "Error random string! \n"); + exit(-1); + } +} + +/* +* get column id of i,j,k,l,m,n +*/ +int get_column_id(char ch) +{ + return (ch - 'i' + 1); //from 1 to 6 +} + +/* +* check whether column value of the NO. tuple is equal to 1 +* col_id: column id, range from 1 to 6 +* tuple_no: record NO., range from 0 to 63 +*/ +bool check_col_equal_one(int tuple_no, int col_id) +{ + int i = (int)pow((double)2, (double)(6 - col_id)); + int j = tuple_no / i; + if(j % 2) + return true; + else + return false; +} + +/* +* get a result after all elements in the array with AND +* value: pointer to a bool array +* len: length of the bool array +*/ +bool AND_op(bool *value, int len) +{ + for(int i = 0; i < len; i++) + { + if(! value[i]) + return false; + } + return true; +} + +/* +* get a result after all elements in the array with OR +* value: pointer to a bool array +* len: length of the bool array +*/ +bool OR_op(bool *value, int len) +{ + for(int i = 0; i < len; i++) + { + if(value[i]) + return true; + } + return false; +} + +/* +* get a result after all elements in the array with NAND +* value: pointer to a bool array +* len: length of the bool array +*/ +bool NAND_op(bool *value, int len) +{ + return (! AND_op(value, len)); +} + +/* +* get a result after all elements in the array with NOR +* value: pointer to a bool array +* len: length of the bool array +*/ +bool NOR_op(bool *value, int len) +{ + return (! OR_op(value, len)); +} + +/* +* AND/NAND/OR/NOR operation for a bool array +*/ +bool calculate_one_op(char op_type, bool *value, int len) +{ + switch(op_type) + { + case 'a': + return AND_op(value, len); + break; + case 'o': + return OR_op(value, len); + break; + case 'A': + return NAND_op(value, len); + break; + case 'O': + return NOR_op(value, len); + break; + } + return false; //make gcc happy +} + +typedef struct _stack_element +{ + char type; + int num; +}stack_element; + +/* +* stack_op, store info for AND,OR,NAND,NOR +* stack_col, store value of column(i,j,k,l,m,n) and temporary result for an operation +*/ +stack_element stack_op[RECURSIVE_LEVEL * COL_LEN]; +bool stack_col[RECURSIVE_LEVEL * COL_LEN * 2]; + +/* +* check whether the given tuple is chosen by judgement condition +* tuple_no, the NO of tuple in TABLE_NAME, range from 0 to TUPLE_NUM +* str: a random string of scan opearation and condition +* len: length of str +*/ +bool check_one_tuple(int tuple_no, char *str, int len) +{ + int pop_op = 0; + int pop_col = 0; + for(int i = 0; i < len; i++) + { + char letter = *(str + i); + if(check_op(letter)) //push + { + stack_op[pop_op].type = letter; + stack_op[pop_op].num = 0; + pop_op++; + } + if(check_col(letter)) //push + { + stack_col[pop_col] = check_col_equal_one(tuple_no, get_column_id(letter)); + pop_col++; + stack_op[pop_op-1].num += 1; + } + if(check_end(letter)) + { + if(pop_op <= 1) + { + return calculate_one_op(stack_op[pop_op-1].type, + stack_col, + stack_op[pop_op-1].num); + } + else + { + bool tmp1 = calculate_one_op(stack_op[pop_op-1].type, + stack_col + pop_col - stack_op[pop_op-1].num, + stack_op[pop_op-1].num); + pop_col -= stack_op[pop_op-1].num; //pop + pop_op--; + stack_col[pop_col] = tmp1; //push + pop_col++; + stack_op[pop_op-1].num += 1; + } + } + } + return false; //make gcc happy +} + +/* +* get lists of tuples which match the scan condiction through calculating +* str: a random string of scan opearation and condition +*/ +void check_all_tuples(char *str, bool *res) +{ + for (int i = 0; i < TUPLE_NUM; i++) + { + if(check_one_tuple(i, str, strlen(str))) + res[i] = true; + } +} + +/* +* convert a letter to group number what ndbapi need +*/ +NdbScanFilter::Group get_api_group(char op_name) +{ + switch (op_name) { + case 'a': return NdbScanFilter::AND; + case 'o': return NdbScanFilter::OR; + case 'A': return NdbScanFilter::NAND; + case 'O': return NdbScanFilter::NOR; + default: + fprintf(stderr, "Invalid group name %c !\n", op_name); + exit(3); + } +} + +/* +* with ndbapi, call begin, eq/ne/lt/gt/le/ge..., end +*/ +NdbScanFilter * call_ndbapi(char *str, NdbTransaction *transaction, + NdbScanOperation *scan, NdbDictionary::Column const *col[]) +{ + NdbScanFilter *scanfilter = new NdbScanFilter(scan); + char *p; + + for (p = str; *p; p++) + { + if(check_op(*p)) + { + if(scanfilter->begin(get_api_group(*p))) + ERR_EXIT(transaction, "filter begin() failed"); + } + if(check_col(*p)) + { + if(scanfilter->eq(col[*p-'i'+1]->getColumnNo(), (Uint32)1)) + ERR_EXIT(transaction, "filter eq() failed"); + } + if(check_end(*p)) + { + if(scanfilter->end()) + ERR_EXIT(transaction, "filter end() failed"); + } + } + + return scanfilter; +} + +/* +* get the tuples through ndbapi, and save the tuples NO. +* str: a random string of scan opearation and condition +*/ +void ndbapi_tuples(Ndb *ndb, char *str, bool *res) +{ + const NdbDictionary::Dictionary *dict = ndb->getDictionary(); + if (!dict) + ERR_EXIT(ndb, "Can't get dict"); + + const NdbDictionary::Table *table = dict->getTable(TABLE_NAME); + if (!table) + ERR_EXIT(dict, "Can't get table"TABLE_NAME); + + const NdbDictionary::Column *col[COL_LEN]; + for(int i = 0; i < COL_LEN; i++) + { + char tmp[128]; + col[i] = table->getColumn(COL_NAME[i]); + if(!col[i]) + { + snprintf(tmp, 128, "Can't get column %s", COL_NAME[i]); + ERR_EXIT(dict, tmp); + } + } + + NdbTransaction *transaction; + NdbScanOperation *scan; + NdbScanFilter *filter; + + transaction = ndb->startTransaction(); + if (!transaction) + ERR_EXIT(ndb, "Can't start transaction"); + + scan = transaction->getNdbScanOperation(table); + if (!scan) + ERR_EXIT(transaction, "Can't get scan op"); + + if (scan->readTuples(NdbOperation::LM_Exclusive)) + ERR_EXIT(scan, "Can't set up read"); + + NdbRecAttr *rec[COL_LEN]; + for(int i = 0; i < COL_LEN; i++) + { + char tmp[128]; + rec[i] = scan->getValue(COL_NAME[i]); + if(!rec[i]) + { + snprintf(tmp, 128, "Can't get rec of %s", COL_NAME[i]); + ERR_EXIT(scan, tmp); + } + } + + filter = call_ndbapi(str, transaction, scan, col); + + if (transaction->execute(NdbTransaction::NoCommit)) + ERR_EXIT(transaction, "Can't execute"); + + int i,j,k,l,m,n; + while (scan->nextResult(true) == 0) + { + do + { + i = rec[1]->u_32_value(); + j = rec[2]->u_32_value(); + k = rec[3]->u_32_value(); + l = rec[4]->u_32_value(); + m = rec[5]->u_32_value(); + n = rec[6]->u_32_value(); + res[32*i+16*j+8*k+4*l+2*m+n] = true; + } while (scan->nextResult(false) == 0); + } + + delete filter; + transaction->close(); +} + +/* +* compare the result between calculation and NDBAPI +* str: a random string of scan opearation and condition +* return: true stands for ndbapi ok, false stands for ndbapi failed +*/ +template class Vector<bool>; +bool compare_cal_ndb(char *str, Ndb *ndb) +{ + Vector<bool> res_cal; + Vector<bool> res_ndb; + + for(int i = 0; i < TUPLE_NUM; i++) + { + res_cal.push_back(false); + res_ndb.push_back(false); + } + + check_all_tuples(str, res_cal.getBase()); + ndbapi_tuples(ndb, str, res_ndb.getBase()); + + for(int i = 0; i < TUPLE_NUM; i++) + { + if(res_cal[i] != res_ndb[i]) + return false; + } + return true; +} + + +int runCreateTables(NDBT_Context* ctx, NDBT_Step* step) +{ + Ndb *pNdb = GETNDB(step); + pNdb->getDictionary()->dropTable(MYTAB1.getName()); + int ret = createTable(pNdb, &MYTAB1, false, true, 0); + if(ret) + return ret; + return NDBT_OK; +} + + +int runDropTables(NDBT_Context* ctx, NDBT_Step* step) +{ + int ret = GETNDB(step)->getDictionary()->dropTable(MYTAB1.getName()); + if(ret == -1) + return NDBT_FAILED; + + return NDBT_OK; +} + +int runScanRandomFilterTest(NDBT_Context* ctx, NDBT_Step* step) +{ + char random_str[MAX_STR_LEN]; + Ndb *myNdb = GETNDB(step); + bool res = true; + + for(int i = 0; i < TEST_NUM; i++) + { + get_rand_op_str_compound(random_str); + if( !compare_cal_ndb(random_str, myNdb)) + return NDBT_FAILED; + } + + return NDBT_OK; +} + +NDBT_TESTSUITE(testScanFilter); +TESTCASE(TEST_NAME, + "Scan table TABLE_NAME for the records which accord with \ + conditions of logical scan operations: AND/OR/NAND/NOR") +{ + INITIALIZER(runCreateTables); + INITIALIZER(runPopulate); + INITIALIZER(runScanRandomFilterTest); + FINALIZER(runDropTables); +} + +NDBT_TESTSUITE_END(testScanFilter); + + +int main(int argc, const char** argv) +{ + ndb_init(); + + Ndb_cluster_connection con; + if(con.connect(12, 5, 1)) + { + return NDBT_ProgramExit(NDBT_FAILED); + } + + return testScanFilter.executeOneCtx(con, &MYTAB1, TEST_NAME); +} diff --git a/storage/ndb/test/ndbapi/testSystemRestart.cpp b/storage/ndb/test/ndbapi/testSystemRestart.cpp index 67429ee0a41..3cd7a3798c5 100644 --- a/storage/ndb/test/ndbapi/testSystemRestart.cpp +++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp @@ -1220,6 +1220,80 @@ runBug24664(NDBT_Context* ctx, NDBT_Step* step) return result; } +int +runBug29167(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + NdbRestarter restarter; + Ndb* pNdb = GETNDB(step); + const Uint32 nodeCount = restarter.getNumDbNodes(); + + if (nodeCount < 2) + return NDBT_OK; + + int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 }; + NdbLogEventHandle handle = + ndb_mgm_create_logevent_handle(restarter.handle, filter); + + struct ndb_logevent event; + int master = restarter.getMasterNodeId(); + do { + int node1 = restarter.getRandomNodeOtherNodeGroup(master, rand()); + int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand()); + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + restarter.dumpStateAllNodes(val2, 2); + int dump[] = { DumpStateOrd::DihSetTimeBetweenGcp, 30000 }; + restarter.dumpStateAllNodes(dump, 2); + + while(ndb_logevent_get_next(handle, &event, 0) >= 0 && + event.type != NDB_LE_GlobalCheckpointCompleted); + + CHECK(restarter.insertErrorInAllNodes(932) == 0); + + CHECK(restarter.insertErrorInNode(node1, 7183) == 0); + CHECK(restarter.insertErrorInNode(node2, 7183) == 0); + + CHECK(restarter.waitClusterNoStart() == 0); + restarter.startAll(); + CHECK(restarter.waitClusterStarted() == 0); + } while(false); + + return result; +} + +int +runBug28770(NDBT_Context* ctx, NDBT_Step* step) { + Ndb* pNdb = GETNDB(step); + NdbRestarter restarter; + int result = NDBT_OK; + int count = 0; + Uint32 i = 0; + Uint32 loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + UtilTransactions utilTrans(*ctx->getTab()); + HugoTransactions hugoTrans(*ctx->getTab()); + + g_info << "Loading records..." << endl; hugoTrans.loadTable(pNdb, + records); + + + while(i<=loops && result != NDBT_FAILED){ + g_info << "Loop " << i << "/"<< loops <<" started" << endl; + CHECK(restarter.restartAll(false, true, false) == 0); + NdbSleep_SecSleep(3); + CHECK(restarter.waitClusterNoStart() == 0); + restarter.insertErrorInAllNodes(6007); + CHECK(restarter.startAll()== 0); + CHECK(restarter.waitClusterStarted() == 0); + CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0); + CHECK(count == records); + i++; + } + ndbout << " runBug28770 finished" << endl; + return result; +} + int runStopper(NDBT_Context* ctx, NDBT_Step* step) { @@ -1667,6 +1741,26 @@ TESTCASE("SR_DD_2b_LCP", "") STEP(runSR_DD_2); FINALIZER(runClearTable); } +TESTCASE("Bug29167", "") +{ + INITIALIZER(runWaitStarted); + STEP(runBug29167); +} +TESTCASE("Bug28770", + "Check readTableFile1 fails, readTableFile2 succeeds\n" + "1. Restart all node -nostart\n" + "2. Insert error 6100 into all nodes\n" + "3. Start all nodes\n" + "4. Ensure cluster start\n" + "5. Read and verify reocrds\n" + "6. Repeat until looping is completed\n"){ + INITIALIZER(runWaitStarted); + INITIALIZER(runClearTable); + STEP(runBug28770); + FINALIZER(runClearTable); +} + + NDBT_TESTSUITE_END(testSystemRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/test_event_merge.cpp b/storage/ndb/test/ndbapi/test_event_merge.cpp index 662b1eb6f4c..d40b985adc2 100644 --- a/storage/ndb/test/ndbapi/test_event_merge.cpp +++ b/storage/ndb/test/ndbapi/test_event_merge.cpp @@ -2184,57 +2184,57 @@ my_long_options[] = { NDB_STD_OPTS("test_event_merge"), { "abort-on-error", 1001, "Do abort() on any error", - (gptr*)&g_opts.abort_on_error, (gptr*)&g_opts.abort_on_error, 0, + (uchar **)&g_opts.abort_on_error, (uchar **)&g_opts.abort_on_error, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "loglevel", 1002, "Logging level in this program 0-3 (default 0)", - (gptr*)&g_opts.loglevel, (gptr*)&g_opts.loglevel, 0, + (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "loop", 1003, "Number of test loops (default 5, 0=forever)", - (gptr*)&g_opts.loop, (gptr*)&g_opts.loop, 0, + (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0, GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "maxops", 1004, "Approx number of PK operations per table (default 1000)", - (gptr*)&g_opts.maxops, (gptr*)&g_opts.maxops, 0, + (uchar **)&g_opts.maxops, (uchar **)&g_opts.maxops, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "maxpk", 1005, "Number of different PK values (default 10, max 1000)", - (gptr*)&g_opts.maxpk, (gptr*)&g_opts.maxpk, 0, + (uchar **)&g_opts.maxpk, (uchar **)&g_opts.maxpk, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "maxtab", 1006, "Number of tables (default 10, max 100)", - (gptr*)&g_opts.maxtab, (gptr*)&g_opts.maxtab, 0, + (uchar **)&g_opts.maxtab, (uchar **)&g_opts.maxtab, 0, GET_INT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "no-blobs", 1007, "Omit blob attributes (5.0: true)", - (gptr*)&g_opts.no_blobs, (gptr*)&g_opts.no_blobs, 0, + (uchar **)&g_opts.no_blobs, (uchar **)&g_opts.no_blobs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-implicit-nulls", 1008, "Insert must include all attrs" " i.e. no implicit NULLs", - (gptr*)&g_opts.no_implicit_nulls, (gptr*)&g_opts.no_implicit_nulls, 0, + (uchar **)&g_opts.no_implicit_nulls, (uchar **)&g_opts.no_implicit_nulls, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-missing-update", 1009, "Update must include all non-PK attrs", - (gptr*)&g_opts.no_missing_update, (gptr*)&g_opts.no_missing_update, 0, + (uchar **)&g_opts.no_missing_update, (uchar **)&g_opts.no_missing_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-multiops", 1010, "Allow only 1 operation per commit", - (gptr*)&g_opts.no_multiops, (gptr*)&g_opts.no_multiops, 0, + (uchar **)&g_opts.no_multiops, (uchar **)&g_opts.no_multiops, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-nulls", 1011, "Create no NULL values", - (gptr*)&g_opts.no_nulls, (gptr*)&g_opts.no_nulls, 0, + (uchar **)&g_opts.no_nulls, (uchar **)&g_opts.no_nulls, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "one-blob", 1012, "Only one blob attribute (default 2)", - (gptr*)&g_opts.one_blob, (gptr*)&g_opts.one_blob, 0, + (uchar **)&g_opts.one_blob, (uchar **)&g_opts.one_blob, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "opstring", 1013, "Operations to run e.g. idiucdc (c is commit) or" " iuuc:uudc (the : separates loops)", - (gptr*)&g_opts.opstring, (gptr*)&g_opts.opstring, 0, + (uchar **)&g_opts.opstring, (uchar **)&g_opts.opstring, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "seed", 1014, "Random seed (0=loop number, default -1=random)", - (gptr*)&g_opts.seed, (gptr*)&g_opts.seed, 0, + (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0, GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 }, { "separate-events", 1015, "Do not combine events per GCI (5.0: true)", - (gptr*)&g_opts.separate_events, (gptr*)&g_opts.separate_events, 0, + (uchar **)&g_opts.separate_events, (uchar **)&g_opts.separate_events, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tweak", 1016, "Whatever the source says", - (gptr*)&g_opts.tweak, (gptr*)&g_opts.tweak, 0, + (uchar **)&g_opts.tweak, (uchar **)&g_opts.tweak, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "use-table", 1017, "Use existing tables", - (gptr*)&g_opts.use_table, (gptr*)&g_opts.use_table, 0, + (uchar **)&g_opts.use_table, (uchar **)&g_opts.use_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, diff --git a/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp index 039a77f4d53..fb77220773d 100644 --- a/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp +++ b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp @@ -27,7 +27,14 @@ using namespace std; // #define MAXROW 64 #define DEFROW 8 -#define MAXTHREADS 24 +/* + NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a + #define from <sys/thread.h> on AIX (IBM compiler). We explicitly + #undef it here lest someone use it by habit and get really funny + results. K&R says we may #undef non-existent symbols, so let's go. +*/ +#undef MAXTHREADS +#define NDB_MAXTHREADS 24 #define DEFTHREADS 2 #define MAXTABLES 16 @@ -83,7 +90,7 @@ int main(int argc, char* argv[]){ char* szTableNames = (char*)malloc(sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ; memset(szTableNames, 0, sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ; - UintPtr pThreadHandles[MAXTHREADS] = { NULL } ; + UintPtr pThreadHandles[NDB_MAXTHREADS] = { NULL } ; AssignTableNames(szTableNames, nNoOfTables) ; @@ -313,7 +320,7 @@ void ParseArguments(int argc, const char** argv){ if (strcmp(argv[i], "-t") == 0) { nNoOfThreads = atoi(argv[i+1]); - if ((nNoOfThreads < 1) || (nNoOfThreads > MAXTHREADS)) + if ((nNoOfThreads < 1) || (nNoOfThreads > NDB_MAXTHREADS)) nNoOfThreads = DEFTHREADS ; } else if (strcmp(argv[i], "-c") == 0) diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am index b5cb69d266e..65aa62d11f0 100644 --- a/storage/ndb/test/run-test/Makefile.am +++ b/storage/ndb/test/run-test/Makefile.am @@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_util.mk.am include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am +include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am test_PROGRAMS = atrt test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \ - conf-daily-basic-ndb08.txt \ - conf-daily-devel-ndb08.txt \ - conf-daily-sql-ndb08.txt \ - conf-ndbmaster.txt \ - conf-shark.txt \ - conf-dl145a.txt + conf-ndbmaster.cnf \ + conf-dl145a.cnf test-tests.txt conf-test.cnf test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \ - atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh + atrt-clear-result.sh autotest-run.sh + +atrt_SOURCES = main.cpp setup.cpp files.cpp -atrt_SOURCES = main.cpp run-test.hpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \ $(top_builddir)/storage/ndb/src/libndbclient.la \ @@ -39,10 +37,18 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ +atrt_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \ + -I$(top_srcdir)/ndb/src/mgmsrv \ + -I$(top_srcdir)/ndb/include/mgmcommon \ + -DMYSQLCLUSTERDIR="\"\"" \ + -DDEFAULT_PREFIX="\"$(prefix)\"" + +atrt_LDFLAGS = -static @ndb_bin_am_ldflags@ + wrappersdir=$(prefix)/bin wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run -EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT +EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT atrt.hpp # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/ndb/test/run-test/atrt-gather-result.sh b/storage/ndb/test/run-test/atrt-gather-result.sh index 93d4ae428d0..f2473578b41 100755 --- a/storage/ndb/test/run-test/atrt-gather-result.sh +++ b/storage/ndb/test/run-test/atrt-gather-result.sh @@ -8,7 +8,7 @@ rm -rf * while [ $# -gt 0 ] do - rsync -a "$1" . + rsync -a --exclude='BACKUP' --exclude='ndb_*_fs' "$1" . shift done diff --git a/storage/ndb/test/run-test/atrt.hpp b/storage/ndb/test/run-test/atrt.hpp new file mode 100644 index 00000000000..14d2dccd245 --- /dev/null +++ b/storage/ndb/test/run-test/atrt.hpp @@ -0,0 +1,161 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef atrt_config_hpp +#define atrt_config_hpp + +#include <ndb_global.h> +#include <Vector.hpp> +#include <BaseString.hpp> +#include <Logger.hpp> +#include <mgmapi.h> +#include <CpcClient.hpp> +#include <Properties.hpp> + +enum ErrorCodes +{ + ERR_OK = 0, + ERR_NDB_FAILED = 101, + ERR_SERVERS_FAILED = 102, + ERR_MAX_TIME_ELAPSED = 103 +}; + +struct atrt_host +{ + size_t m_index; + BaseString m_user; + BaseString m_basedir; + BaseString m_hostname; + SimpleCpcClient * m_cpcd; + Vector<struct atrt_process*> m_processes; +}; + +struct atrt_options +{ + enum Feature { + AO_REPLICATION = 1, + AO_NDBCLUSTER = 2 + }; + + int m_features; + Properties m_loaded; + Properties m_generated; +}; + +struct atrt_process +{ + size_t m_index; + struct atrt_host * m_host; + struct atrt_cluster * m_cluster; + + enum Type { + AP_ALL = 255 + ,AP_NDBD = 1 + ,AP_NDB_API = 2 + ,AP_NDB_MGMD = 4 + ,AP_MYSQLD = 16 + ,AP_CLIENT = 32 + ,AP_CLUSTER = 256 // Used for options parsing for "cluster" options + } m_type; + + SimpleCpcClient::Process m_proc; + + NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm + atrt_process * m_mysqld; // if type == client + atrt_process * m_rep_src; // if type == mysqld + Vector<atrt_process*> m_rep_dst; // if type == mysqld + + atrt_options m_options; +}; + +struct atrt_cluster +{ + BaseString m_name; + BaseString m_dir; + Vector<atrt_process*> m_processes; + atrt_options m_options; +}; + +struct atrt_config +{ + bool m_generated; + BaseString m_key; + BaseString m_replication; + Vector<atrt_host*> m_hosts; + Vector<atrt_cluster*> m_clusters; + Vector<atrt_process*> m_processes; +}; + +struct atrt_testcase +{ + bool m_report; + bool m_run_all; + time_t m_max_time; + BaseString m_command; + BaseString m_args; + BaseString m_name; +}; + +extern Logger g_logger; + +void require(bool x); +bool parse_args(int argc, char** argv); +bool setup_config(atrt_config&); +bool configure(atrt_config&, int setup); +bool setup_directories(atrt_config&, int setup); +bool setup_files(atrt_config&, int setup, int sshx); + +bool deploy(atrt_config&); +bool sshx(atrt_config&, unsigned procmask); +bool start(atrt_config&, unsigned procmask); + +bool remove_dir(const char *, bool incl = true); +bool connect_hosts(atrt_config&); +bool connect_ndb_mgm(atrt_config&); +bool wait_ndb(atrt_config&, int ndb_mgm_node_status); +bool start_processes(atrt_config&, int); +bool stop_processes(atrt_config&, int); +bool update_status(atrt_config&, int); +int is_running(atrt_config&, int); +bool gather_result(atrt_config&, int * result); + +bool read_test_case(FILE *, atrt_testcase&, int& line); +bool setup_test_case(atrt_config&, const atrt_testcase&); + +bool setup_hosts(atrt_config&); + +/** + * Global variables... + */ +extern Logger g_logger; +extern atrt_config g_config; + +extern const char * g_cwd; +extern const char * g_my_cnf; +extern const char * g_user; +extern const char * g_basedir; +extern const char * g_prefix; +extern int g_baseport; +extern int g_fqpn; +extern int g_default_ports; + +extern const char * g_clusters; + +extern const char *save_file; +extern const char *save_group_suffix; +extern char *save_extra_file; + +#endif diff --git a/storage/ndb/test/run-test/autotest-boot.sh b/storage/ndb/test/run-test/autotest-boot.sh new file mode 100644 index 00000000000..1df3a26cc8a --- /dev/null +++ b/storage/ndb/test/run-test/autotest-boot.sh @@ -0,0 +1,188 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="autotest-boot.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e + +echo "`date` starting: $*" + +verbose=0 +do_clone=yes +build=yes + +tag= +conf= +extra_args= +extra_clone= +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --no-clone) do_clone="";; + --no-build) build="";; + --verbose) verbose=`expr $verbose + 1`;; + --clone=*) clone=`echo $1 | sed s/--clone=//`;; + --version) echo $VERSION; exit;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + --tag=*) tag=`echo $1 | sed s/--tag=//`;; + --*) echo "Unknown arg: $1";; + *) RUN=$*;; + esac + shift +done + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# +if [ -z "$conf" ] +then + if [ -f "`pwd`/autotest.conf" ] + then + conf="`pwd`/autotest.conf" + elif [ -f "$HOME/autotest.conf" ] + then + conf="$HOME/autotest.conf" + fi +fi + +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: >$conf<" + exit +fi + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="src_clone_base install_dir build_dir" +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +#################################### +# Setup the lock file name and path# +# Setup the clone source location # +#################################### + +src_clone=${src_clone_base}${clone} + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -f $LOCK ] +then + echo "Lock file exists: $LOCK" + exit 1 +fi + +####################################### +# If the lock file does not exist then# +# create it with date and run info # +####################################### + +echo "$DATE $RUN" > $LOCK + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + +# You can add more to this path# +################################ + +if [ -z "$tag" ] +then + dst_place=${build_dir}/clone-$clone-$DATE.$$ +else + dst_place=${build_dir}/clone-$tag-$DATE.$$ + extra_args="$extra_args --clone=$tag" + extra_clone="-r$tag" +fi + +######################################### +# Delete source and pull down the latest# +######################################### + +if [ "$do_clone" ] +then + rm -rf $dst_place + if [ `echo $src_clone | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l $extra_clone $src_clone $dst_place + else + bk clone $extra_clone $src_clone $dst_place + fi +fi + +########################################## +# Build the source, make installs, and # +# create the database to be rsynced # +########################################## + +if [ "$build" ] +then + cd $dst_place + rm -rf $install_dir + BUILD/compile-ndb-autotest --prefix=$install_dir + make install +fi + + +################################ +# Start run script # +################################ + +script=$install_dir/mysql-test/ndb/autotest-run.sh +sh -x $script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args + +if [ "$build" ] +then + rm -rf $dst_place +fi +rm -f $LOCK diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh new file mode 100644 index 00000000000..b543cd1efb9 --- /dev/null +++ b/storage/ndb/test/run-test/autotest-run.sh @@ -0,0 +1,270 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="autotest-run.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e +ulimit -Sc unlimited + +echo "`date` starting: $*" + +RSYNC_RSH=ssh +export RSYNC_RSH + +verbose=0 +report=yes +nolock= +RUN="daily-basic" +conf=autotest.conf +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --verbose) verbose=`expr $verbose + 1`;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + --version) echo $VERSION; exit;; + --suite=*) RUN=`echo $1 | sed s/--suite=//`;; + --install-dir=*) install_dir=`echo $1 | sed s/--install-dir=//`;; + --clone=*) clone=`echo $1 | sed s/--clone=//`;; + --nolock) nolock=true;; + esac + shift +done + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# + +install_dir_save=$install_dir +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: $conf" + exit +fi +install_dir=$install_dir_save + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="target base_dir install_dir hosts" +if [ "$report" ] +then + vars="$vars result_host result_path" +fi +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -z "$nolock" ] +then + if [ -f $LOCK ] + then + echo "Lock file exists: $LOCK" + exit 1 + fi + echo "$DATE $RUN" > $LOCK +fi + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + + +############################################### +# Check that all interesting files are present# +############################################### + +test_dir=$install_dir/mysql-test/ndb +atrt=$test_dir/atrt +test_file=$test_dir/$RUN-tests.txt + +if [ ! -f "$test_file" ] +then + echo "Cant find testfile: $test_file" + exit 1 +fi + +if [ ! -x "$atrt" ] +then + echo "Cant find atrt binary at $atrt" + exit 1 +fi + +############################ +# check ndb_cpcc fail hosts# +############################ +failed=`ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}'` +if [ "$failed" ] +then + echo "Cant contact cpcd on $failed, exiting" + exit 1 +fi + +############################# +# Function for replacing the# +# choose host with real host# +# names. Note $$ = PID # +############################# +choose(){ + SRC=$1 + TMP1=/tmp/choose.$$ + TMP2=/tmp/choose.$$.$$ + shift + + cp $SRC $TMP1 + i=1 + while [ $# -gt 0 ] + do + sed -e s,"CHOOSE_host$i",$1,g < $TMP1 > $TMP2 + mv $TMP2 $TMP1 + shift + i=`expr $i + 1` + done + cat $TMP1 + rm -f $TMP1 +} + +choose_conf(){ + if [ -f $test_dir/conf-$1-$HOST.cnf ] + then + echo "$test_dir/conf-$1-$HOST.cnf" + elif [ -f $test_dir/conf-$1.cnf ] + then + echo "$test_dir/conf-$1.cnf" + elif [ -f $test_dir/conf-$HOST.cnf ] + then + echo "$test_dir/conf-$HOST.cnf" + else + echo "Unable to find conf file looked for" 1>&2 + echo "$test_dir/conf-$1-$HOST.cnf and" 1>&2 + echo "$test_dir/conf-$HOST.cnf" 1>&2 + echo "$test_dir/conf-$1.cnf" 1>&2 + exit + fi +} + +######################################### +# Count how many computers we have ready# +######################################### + +count_hosts(){ + cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \ + if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l` + echo $cnt +} + +conf=`choose_conf $RUN` +count=`count_hosts $conf` +avail=`echo $hosts | wc -w` +if [ $count -gt $avail ] + then + echo "Not enough hosts" + echo "Needs: $count available: $avail ($avail_hosts)" + exit 1 +fi + +### +# Make directories needed + +p=`pwd` +run_dir=$install_dir/run-$RUN-$clone-$target +res_dir=$base_dir/result-$RUN-$clone-$target/$DATE +tar_dir=$base_dir/saved-results + +mkdir -p $run_dir $res_dir $tar_dir +rm -rf $res_dir/* $run_dir/* + + +### +# +# Do sed substitiutions +# +cd $run_dir +choose $conf $hosts > d.tmp.$$ +sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf + +# Setup configuration +$atrt Cdq my.cnf + +# Start... +$atrt --report-file=report.txt --log-file=log.txt --testcase-file=$test_dir/$RUN-tests.txt my.cnf + +# Make tar-ball +[ -f log.txt ] && mv log.txt $res_dir +[ -f report.txt ] && mv report.txt $res_dir +[ "`find . -name 'result*'`" ] && mv result* $res_dir +cd $res_dir + +echo "date=$DATE" > info.txt +echo "suite=$RUN" >> info.txt +echo "clone=$clone" >> info.txt +echo "arch=$target" >> info.txt +find . | xargs chmod ugo+r + +cd .. +p2=`pwd` +cd .. +tarfile=res.$RUN.$clone.$target.$DATE.$HOST.$$.tgz +tar cfz $tar_dir/$tarfile `basename $p2`/$DATE + +if [ "$report" ] +then + scp $tar_dir/$tarfile $result_host:$result_path/ +fi + +cd $p +rm -rf $res_dir $run_dir + +if [ -z "$nolock" ] +then + rm -f $LOCK +fi diff --git a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt deleted file mode 100644 index bcd809593f3..00000000000 --- a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt +++ /dev/null @@ -1,19 +0,0 @@ -baseport: 14000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt deleted file mode 100644 index 8b340e6a39d..00000000000 --- a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt +++ /dev/null @@ -1,19 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt deleted file mode 100644 index 45e6e25f030..00000000000 --- a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 16000 -basedir: CHOOSE_dir -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: CHOOSE_dir/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt deleted file mode 100644 index 0d6a99f8d48..00000000000 --- a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt +++ /dev/null @@ -1,20 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -mysqld: CHOOSE_host1 CHOOSE_host4 -mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt deleted file mode 100644 index 0d6a99f8d48..00000000000 --- a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt +++ /dev/null @@ -1,20 +0,0 @@ -baseport: 16000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -mysqld: CHOOSE_host1 CHOOSE_host4 -mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 16000 -ArbitrationRank: 1 -DataDir: . diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf new file mode 100644 index 00000000000..5f61bee755d --- /dev/null +++ b/storage/ndb/test/run-test/conf-dl145a.cnf @@ -0,0 +1,26 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .2node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.2node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 100M +DataMemory = 300M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M +NoOfFragmentLogFiles = 4 +FragmentLogFileSize = 64M + diff --git a/storage/ndb/test/run-test/conf-dl145a.txt b/storage/ndb/test/run-test/conf-dl145a.txt deleted file mode 100644 index d0a240f09d1..00000000000 --- a/storage/ndb/test/run-test/conf-dl145a.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: /home/ndbdev/autotest/run -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /home/ndbdev/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-ndbmaster.cnf b/storage/ndb/test/run-test/conf-ndbmaster.cnf new file mode 100644 index 00000000000..417e2988d0d --- /dev/null +++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf @@ -0,0 +1,23 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .4node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.4node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 100M +DataMemory = 300M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M diff --git a/storage/ndb/test/run-test/conf-ndbmaster.txt b/storage/ndb/test/run-test/conf-ndbmaster.txt deleted file mode 100644 index 9f50432f5e3..00000000000 --- a/storage/ndb/test/run-test/conf-ndbmaster.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: CHOOSE_dir -mgm: CHOOSE_host1 -ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: CHOOSE_dir/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-repl.cnf b/storage/ndb/test/run-test/conf-repl.cnf new file mode 100644 index 00000000000..57eb2ee413e --- /dev/null +++ b/storage/ndb/test/run-test/conf-repl.cnf @@ -0,0 +1,28 @@ +[atrt] +basedir=CHOOSE_dir +baseport=15000 +clusters= .master,.slave +replicate= 1.master:1.slave + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config] +MaxNoOfSavedMessages= 1000 +DataMemory = 100M + +[cluster_config.master] +NoOfReplicas = 2 +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +mysqld = CHOOSE_host1 +ndbapi= CHOOSE_host1 + +[cluster_config.slave] +NoOfReplicas = 1 +ndb_mgmd = CHOOSE_host4 +ndbd = CHOOSE_host4 +mysqld = CHOOSE_host4 diff --git a/storage/ndb/test/run-test/conf-shark.txt b/storage/ndb/test/run-test/conf-shark.txt deleted file mode 100644 index d66d0280d8a..00000000000 --- a/storage/ndb/test/run-test/conf-shark.txt +++ /dev/null @@ -1,22 +0,0 @@ -baseport: 14000 -basedir: /space/autotest -mgm: CHOOSE_host1 -ndb: CHOOSE_host1 CHOOSE_host1 -api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 --- cluster config -[DB DEFAULT] -NoOfReplicas: 2 -IndexMemory: 100M -DataMemory: 300M -BackupMemory: 64M -MaxNoOfConcurrentScans: 100 -DataDir: . -FileSystemPath: /space/autotest/run - -[MGM DEFAULT] -PortNumber: 14000 -ArbitrationRank: 1 -DataDir: . - -[TCP DEFAULT] -SendBufferMemory: 2M diff --git a/storage/ndb/test/run-test/conf-test.cnf b/storage/ndb/test/run-test/conf-test.cnf new file mode 100644 index 00000000000..e528eeb1d8b --- /dev/null +++ b/storage/ndb/test/run-test/conf-test.cnf @@ -0,0 +1,26 @@ +[atrt] +basedir = CHOOSE_dir +baseport = 14000 +clusters = .2node + +[ndb_mgmd] + +[mysqld] +skip-innodb +skip-bdb + +[cluster_config.2node] +ndb_mgmd = CHOOSE_host1 +ndbd = CHOOSE_host2,CHOOSE_host3 +ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1 + +NoOfReplicas = 2 +IndexMemory = 25M +DataMemory = 100M +BackupMemory = 64M +MaxNoOfConcurrentScans = 100 +MaxNoOfSavedMessages= 1000 +SendBufferMemory = 2M +NoOfFragmentLogFiles = 4 +FragmentLogFileSize = 64M + diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index ef5082ca30c..845cd5c21bb 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -65,6 +65,14 @@ args: -n PkRead max-time: 500 cmd: testBasic +args: -n PkSimpleRead + +max-time: 500 +cmd: testBasic +args: -n PkDirtyRead + +max-time: 500 +cmd: testBasic args: -n PkUpdate max-time: 500 @@ -81,6 +89,10 @@ args: -n UpdateAndRead max-time: 500 cmd: testBasic +args: -n DeleteRead + +max-time: 500 +cmd: testBasic args: -n PkReadAndLocker T6 D1 D2 max-time: 500 @@ -474,20 +486,24 @@ cmd: testScan args: -n Bug24447 T1 max-time: 1000 -cmd: testNodeRestart -args: -n Bug27003 T1 +cmd: testScan +args: -n ScanVariants max-time: 1000 cmd: testNodeRestart -args: -n Bug27283 T1 +args: -n Bug27003 T1 + +max-time: 300 +cmd: testSystemRestart +args: -n Bug29167 T1 + +max-time: 300 +cmd: testSystemRestart +args: -l 2 -n Bug28770 T1 max-time: 1000 cmd: testNodeRestart -args: -n Bug28023 T7 D2 - -max-time: 500 -cmd: testScan -args: -n ScanVariants +args: -n Bug27283 T1 max-time: 500 cmd: testNodeRestart @@ -545,6 +561,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug25554 T1 +max-time: 3000 +cmd: testNodeRestart +args: -n Bug25984 T1 + max-time: 1000 cmd: testNodeRestart args: -n Bug26457 T1 @@ -555,10 +575,22 @@ args: -n Bug26481 T1 max-time: 1000 cmd: testNodeRestart +args: -n Bug28023 T7 D2 + +max-time: 1000 +cmd: testNodeRestart args: -n Bug29364 T1 # # DICT TESTS +max-time: 500 +cmd: testDict +args: -n Bug29501 T1 + +max-time: 500 +cmd: testDict +args: -n testDropDDObjects T1 + max-time: 1500 cmd: testDict args: -n CreateAndDrop @@ -611,6 +643,18 @@ max-time: 1500 cmd: testDict args: -n Restart_NR2 T1 I3 +max-time: 500 +cmd: testDict +args: -n Bug21755 T1 + +max-time: 1500 +cmd: testDict +args: -l 25 -n DictRestart T1 + +max-time: 500 +cmd: testDict +args: -n Bug24631 T1 + # # TEST NDBAPI # @@ -716,6 +760,14 @@ max-time: 1500 cmd: testBlobs args: +max-time: 600 +cmd: testBlobs +args: -bug 27018 + +max-time: 600 +cmd: testBlobs +args: -bug 27370 + max-time: 5000 cmd: testOIBasic args: -case abcdefz @@ -870,6 +922,46 @@ cmd: DbAsyncGenerator args: -time 60 -p 1 -proc 25 type: bench +max-time: 120 +cmd: testMgm +args: -n ApiSessionFailure T1 + +max-time: 15 +cmd: testMgm +args: -n ApiConnectTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiTimeoutBasic T1 + +max-time: 120 +cmd: testMgm +args: -n ApiSessionFailure T1 + +max-time: 120 +cmd: testMgm +args: -n ApiGetStatusTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiGetConfigTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiMgmEventTimeout T1 + +max-time: 120 +cmd: testMgm +args: -n ApiMgmStructEventTimeout T1 + +max-time: 180 +cmd: testIndex +args: -n Bug28804 T1 T3 + +max-time: 180 +cmd: testIndex +args: -n Bug28804_ATTRINFO T1 T3 + max-time: 1500 cmd: testSystemRestart args: -n SR_DD_1 D1 @@ -942,3 +1034,4 @@ max-time: 300 cmd: test_event args: -n Bug31701 T1 + diff --git a/storage/ndb/test/run-test/example-my.cnf b/storage/ndb/test/run-test/example-my.cnf new file mode 100644 index 00000000000..99e1ce9f75b --- /dev/null +++ b/storage/ndb/test/run-test/example-my.cnf @@ -0,0 +1,116 @@ +[atrt] +basedir=/home/jonas/atrt +baseport=10000 +clusters = .master +clusters= .master,.slave +replicate = 1.master:1.slave +replicate = 2.master:2.slave + +[cluster_config] +NoOfReplicas= 2 +IndexMemory= 10M +DataMemory= 50M +MaxNoOfConcurrentScans= 100 +Diskless = 1 + +[cluster_config.master] +ndb_mgmd = local1 +ndbd = local1,local1 +mysqld = local1,local1 +ndbapi= local1 +NoOfReplicas= 2 + +[cluster_config.slave] +ndb_mgmd = local1 +ndbd = local1 +ndbapi= local1 +mysqld = local1,local1 +NoOfReplicas= 1 + +[mysqld] +skip-innodb +skip-bdb + +# +# Generated by atrt +# Mon May 29 23:27:49 2006 + +[mysql_cluster.master] +ndb-connectstring= local1:10000 + +[cluster_config.ndb_mgmd.1.master] +PortNumber= 10000 + +[cluster_config.ndbd.1.master] +FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1 + +[cluster_config.ndbd.2.master] +FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2 + +[mysqld.1.master] +datadir= /home/jonas/atrt/cluster.master/mysqld.1 +socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock +port= 10001 +server-id= 1 +log-bin +ndb-connectstring= local1:10000 +ndbcluster + +[client.1.master] +socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock +port= 10001 + +[mysqld.2.master] +datadir= /home/jonas/atrt/cluster.master/mysqld.2 +socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock +port= 10002 +server-id= 2 +log-bin +ndb-connectstring= local1:10000 +ndbcluster + +[client.2.master] +socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock +port= 10002 + +[mysql_cluster.slave] +ndb-connectstring= local1:10003 + +[cluster_config.ndb_mgmd.1.slave] +PortNumber= 10003 + +[cluster_config.ndbd.1.slave] +FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1 + +[mysqld.1.slave] +datadir= /home/jonas/atrt/cluster.slave/mysqld.1 +socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock +port= 10004 +server-id= 3 +master-host= local1 +master-port= 10001 +master-user= root +master-password= "" +ndb-connectstring= local1:10003 +ndbcluster + +[client.1.slave] +socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock +port= 10004 + +[mysqld.2.slave] +datadir= /home/jonas/atrt/cluster.slave/mysqld.2 +socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock +port= 10005 +server-id= 4 +master-host= local1 +master-port= 10002 +master-user= root +master-password= "" +ndb-connectstring= local1:10003 +ndbcluster + +[client.2.slave] +socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock +port= 10005 + diff --git a/storage/ndb/test/run-test/files.cpp b/storage/ndb/test/run-test/files.cpp new file mode 100644 index 00000000000..c6a29350b91 --- /dev/null +++ b/storage/ndb/test/run-test/files.cpp @@ -0,0 +1,384 @@ +#include "atrt.hpp" +#include <sys/types.h> +#include <dirent.h> + +static bool create_directory(const char * path); + +bool +setup_directories(atrt_config& config, int setup) +{ + /** + * 0 = validate + * 1 = setup + * 2 = setup+clean + */ + for (size_t i = 0; i < config.m_clusters.size(); i++) + { + atrt_cluster& cluster = *config.m_clusters[i]; + for (size_t j = 0; j<cluster.m_processes.size(); j++) + { + atrt_process& proc = *cluster.m_processes[j]; + const char * dir = proc.m_proc.m_cwd.c_str(); + struct stat sbuf; + int exists = 0; + if (lstat(dir, &sbuf) == 0) + { + if (S_ISDIR(sbuf.st_mode)) + exists = 1; + else + exists = -1; + } + + switch(setup){ + case 0: + switch(exists){ + case 0: + g_logger.error("Could not find directory: %s", dir); + return false; + case -1: + g_logger.error("%s is not a directory!", dir); + return false; + } + break; + case 1: + if (exists == -1) + { + g_logger.error("%s is not a directory!", dir); + return false; + } + break; + case 2: + if (exists == 1) + { + if (!remove_dir(dir)) + { + g_logger.error("Failed to remove %s!", dir); + return false; + } + exists = 0; + break; + } + else if (exists == -1) + { + if (!unlink(dir)) + { + g_logger.error("Failed to remove %s!", dir); + return false; + } + exists = 0; + } + } + if (exists != 1) + { + if (!create_directory(dir)) + { + return false; + } + } + } + } + return true; +} + +static +void +printfile(FILE* out, Properties& props, const char * section, ...) +{ + Properties::Iterator it (&props); + const char * name = it.first(); + if (name) + { + va_list ap; + va_start(ap, section); + /* const int ret = */ vfprintf(out, section, ap); + va_end(ap); + fprintf(out, "\n"); + + for (; name; name = it.next()) + { + const char* val; + props.get(name, &val); + fprintf(out, "%s %s\n", name + 2, val); + } + fprintf(out, "\n"); + } + fflush(out); +} + +bool +setup_files(atrt_config& config, int setup, int sshx) +{ + /** + * 0 = validate + * 1 = setup + * 2 = setup+clean + */ + BaseString mycnf; + mycnf.assfmt("%s/my.cnf", g_basedir); + + if (mycnf != g_my_cnf) + { + struct stat sbuf; + int ret = lstat(mycnf.c_str(), &sbuf); + + if (ret == 0) + { + if (unlink(mycnf.c_str()) != 0) + { + g_logger.error("Failed to remove %s", mycnf.c_str()); + return false; + } + } + + BaseString cp = "cp "; + cp.appfmt("%s %s", g_my_cnf, mycnf.c_str()); + if (system(cp.c_str()) != 0) + { + g_logger.error("Failed to '%s'", cp.c_str()); + return false; + } + } + + if (setup == 2 || config.m_generated) + { + /** + * Do mysql_install_db + */ + for (size_t i = 0; i < config.m_clusters.size(); i++) + { + atrt_cluster& cluster = *config.m_clusters[i]; + for (size_t j = 0; j<cluster.m_processes.size(); j++) + { + atrt_process& proc = *cluster.m_processes[j]; + if (proc.m_type == atrt_process::AP_MYSQLD) + { + const char * val; + require(proc.m_options.m_loaded.get("--datadir=", &val)); + BaseString tmp; + tmp.assfmt("%s/bin/mysql_install_db --defaults-file=%s/my.cnf --datadir=%s > /dev/null 2>&1", + g_prefix, g_basedir, val); + if (system(tmp.c_str()) != 0) + { + g_logger.error("Failed to mysql_install_db for %s, cmd: >%s<", + proc.m_proc.m_cwd.c_str(), + tmp.c_str()); + } + else + { + g_logger.info("mysql_install_db for %s", + proc.m_proc.m_cwd.c_str()); + } + } + } + } + } + + FILE * out = NULL; + if (config.m_generated == false) + { + g_logger.info("Nothing configured..."); + } + else + { + out = fopen(mycnf.c_str(), "a+"); + if (out == 0) + { + g_logger.error("Failed to open %s for append", mycnf.c_str()); + return false; + } + time_t now = time(0); + fprintf(out, "#\n# Generated by atrt\n"); + fprintf(out, "# %s\n", ctime(&now)); + } + + for (size_t i = 0; i < config.m_clusters.size(); i++) + { + atrt_cluster& cluster = *config.m_clusters[i]; + if (out) + { + Properties::Iterator it(&cluster.m_options.m_generated); + printfile(out, cluster.m_options.m_generated, + "[mysql_cluster%s]", cluster.m_name.c_str()); + } + + for (size_t j = 0; j<cluster.m_processes.size(); j++) + { + atrt_process& proc = *cluster.m_processes[j]; + + if (out) + { + switch(proc.m_type){ + case atrt_process::AP_NDB_MGMD: + printfile(out, proc.m_options.m_generated, + "[cluster_config.ndb_mgmd.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_NDBD: + printfile(out, proc.m_options.m_generated, + "[cluster_config.ndbd.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_MYSQLD: + printfile(out, proc.m_options.m_generated, + "[mysqld.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_NDB_API: + break; + case atrt_process::AP_CLIENT: + printfile(out, proc.m_options.m_generated, + "[client.%d%s]", + proc.m_index, proc.m_cluster->m_name.c_str()); + break; + case atrt_process::AP_ALL: + case atrt_process::AP_CLUSTER: + abort(); + } + } + + /** + * Create env.sh + */ + BaseString tmp; + tmp.assfmt("%s/env.sh", proc.m_proc.m_cwd.c_str()); + char **env = BaseString::argify(0, proc.m_proc.m_env.c_str()); + if (env[0]) + { + Vector<BaseString> keys; + FILE *fenv = fopen(tmp.c_str(), "w+"); + if (fenv == 0) + { + g_logger.error("Failed to open %s for writing", tmp.c_str()); + return false; + } + for (size_t k = 0; env[k]; k++) + { + tmp = env[k]; + int pos = tmp.indexOf('='); + require(pos > 0); + env[k][pos] = 0; + fprintf(fenv, "%s=\"%s\"\n", env[k], env[k]+pos+1); + keys.push_back(env[k]); + free(env[k]); + } + fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix); + keys.push_back("PATH"); + for (size_t k = 0; k<keys.size(); k++) + fprintf(fenv, "export %s\n", keys[k].c_str()); + fflush(fenv); + fclose(fenv); + } + free(env); + + tmp.assfmt("%s/ssh-login.sh", proc.m_proc.m_cwd.c_str()); + FILE* fenv = fopen(tmp.c_str(), "w+"); + if (fenv == 0) + { + g_logger.error("Failed to open %s for writing", tmp.c_str()); + return false; + } + fprintf(fenv, "#!/bin/sh\n"); + fprintf(fenv, "cd %s\n", proc.m_proc.m_cwd.c_str()); + fprintf(fenv, "[ -f /etc/profile ] && . /etc/profile\n"); + fprintf(fenv, ". env.sh\n"); + fprintf(fenv, "ulimit -Sc unlimited\n"); + fprintf(fenv, "bash -i"); + fflush(fenv); + fclose(fenv); + } + } + + if (out) + { + fflush(out); + fclose(out); + } + + return true; +} + +static +bool +create_directory(const char * path) +{ + BaseString tmp(path); + Vector<BaseString> list; + if (tmp.split(list, "/") == 0) + { + g_logger.error("Failed to create directory: %s", tmp.c_str()); + return false; + } + + BaseString cwd = "/"; + for (size_t i = 0; i < list.size(); i++) + { + cwd.append(list[i].c_str()); + cwd.append("/"); + mkdir(cwd.c_str(), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP); + } + + struct stat sbuf; + if (lstat(path, &sbuf) != 0 || + !S_ISDIR(sbuf.st_mode)) + { + g_logger.error("Failed to create directory: %s (%s)", + tmp.c_str(), + cwd.c_str()); + return false; + } + + return true; +} + +bool +remove_dir(const char * path, bool inclusive) +{ + DIR* dirp = opendir(path); + + if (dirp == 0) + { + if(errno != ENOENT) + { + g_logger.error("Failed to remove >%s< errno: %d %s", + path, errno, strerror(errno)); + return false; + } + return true; + } + + struct dirent * dp; + BaseString name = path; + name.append("/"); + while ((dp = readdir(dirp)) != NULL) + { + if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) + { + BaseString tmp = name; + tmp.append(dp->d_name); + + if (remove(tmp.c_str()) == 0) + { + continue; + } + + if (!remove_dir(tmp.c_str())) + { + closedir(dirp); + return false; + } + } + } + + closedir(dirp); + if (inclusive) + { + if (rmdir(path) != 0) + { + g_logger.error("Failed to remove >%s< errno: %d %s", + path, errno, strerror(errno)); + return false; + } + } + return true; +} + diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp index aef041d24d6..b5c4385f5d3 100644 --- a/storage/ndb/test/run-test/main.cpp +++ b/storage/ndb/test/run-test/main.cpp @@ -14,20 +14,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <ndb_global.h> -#include <getarg.h> -#include <BaseString.hpp> -#include <Parser.hpp> +#include "atrt.hpp" +#include <my_sys.h> +#include <my_getopt.h> + #include <NdbOut.hpp> -#include <Properties.hpp> #include <NdbAutoPtr.hpp> -#include "run-test.hpp" #include <SysLogHandler.hpp> #include <FileLogHandler.hpp> -#include <mgmapi.h> -#include "CpcClient.hpp" +#include <NdbSleep.h> + +#define PATH_SEPARATOR "/" /** Global variables */ static const char progname[] = "ndb_atrt"; @@ -36,76 +35,198 @@ static const char * g_analyze_progname = "atrt-analyze-result.sh"; static const char * g_clear_progname = "atrt-clear-result.sh"; static const char * g_setup_progname = "atrt-setup.sh"; -static const char * g_setup_path = 0; -static const char * g_process_config_filename = "d.txt"; static const char * g_log_filename = 0; static const char * g_test_case_filename = 0; static const char * g_report_filename = 0; -static const char * g_default_user = 0; -static const char * g_default_base_dir = 0; -static int g_default_base_port = 0; -static int g_mysqld_use_base = 1; +static int g_do_setup = 0; +static int g_do_deploy = 0; +static int g_do_sshx = 0; +static int g_do_start = 0; +static int g_do_quit = 0; -static int g_report = 0; -static int g_verbosity = 0; +static int g_help = 0; +static int g_verbosity = 1; static FILE * g_report_file = 0; static FILE * g_test_case_file = stdin; +static int g_mode = 0; Logger g_logger; atrt_config g_config; - -static int g_mode_bench = 0; -static int g_mode_regression = 0; -static int g_mode_interactive = 0; -static int g_mode = 0; - -static -struct getargs args[] = { - { "process-config", 0, arg_string, &g_process_config_filename, 0, 0 }, - { "setup-path", 0, arg_string, &g_setup_path, 0, 0 }, - { 0, 'v', arg_counter, &g_verbosity, 0, 0 }, - { "log-file", 0, arg_string, &g_log_filename, 0, 0 }, - { "testcase-file", 'f', arg_string, &g_test_case_filename, 0, 0 }, - { 0, 'R', arg_flag, &g_report, 0, 0 }, - { "report-file", 0, arg_string, &g_report_filename, 0, 0 }, - { "interactive", 'i', arg_flag, &g_mode_interactive, 0, 0 }, - { "regression", 'r', arg_flag, &g_mode_regression, 0, 0 }, - { "bench", 'b', arg_flag, &g_mode_bench, 0, 0 }, +const char * g_user = 0; +int g_baseport = 10000; +int g_fqpn = 0; +int g_default_ports = 0; + +const char * g_cwd = 0; +const char * g_basedir = 0; +const char * g_my_cnf = 0; +const char * g_prefix = 0; +const char * g_clusters = 0; +BaseString g_replicate; +const char *save_file = 0; +char *save_extra_file = 0; +const char *save_group_suffix = 0; +const char * g_dummy; +char * g_env_path = 0; + +/** Dummy, extern declared in ndb_opts.h */ +int g_print_full_config = 0, opt_ndb_shm; +my_bool opt_core; + +static struct my_option g_options[] = +{ + { "help", '?', "Display this help and exit.", + (uchar **) &g_help, (uchar **) &g_help, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "version", 'V', "Output version information and exit.", 0, 0, 0, + GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "clusters", 256, "Cluster", + (uchar **) &g_clusters, (uchar **) &g_clusters, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "replicate", 1024, "replicate", + (uchar **) &g_dummy, (uchar **) &g_dummy, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "log-file", 256, "log-file", + (uchar **) &g_log_filename, (uchar **) &g_log_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "testcase-file", 'f', "testcase-file", + (uchar **) &g_test_case_filename, (uchar **) &g_test_case_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "report-file", 'r', "report-file", + (uchar **) &g_report_filename, (uchar **) &g_report_filename, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "basedir", 256, "Base path", + (uchar **) &g_basedir, (uchar **) &g_basedir, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "baseport", 256, "Base port", + (uchar **) &g_baseport, (uchar **) &g_baseport, + 0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0}, + { "prefix", 256, "mysql install dir", + (uchar **) &g_prefix, (uchar **) &g_prefix, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "verbose", 'v', "Verbosity", + (uchar **) &g_verbosity, (uchar **) &g_verbosity, + 0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0}, + { "configure", 256, "configure", + (uchar **) &g_do_setup, (uchar **) &g_do_setup, + 0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 }, + { "deploy", 256, "deploy", + (uchar **) &g_do_deploy, (uchar **) &g_do_deploy, + 0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 }, + { "sshx", 256, "sshx", + (uchar **) &g_do_sshx, (uchar **) &g_do_sshx, + 0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 }, + { "start", 256, "start", + (uchar **) &g_do_start, (uchar **) &g_do_start, + 0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 }, + { "fqpn", 256, "Fully qualified path-names ", + (uchar **) &g_fqpn, (uchar **) &g_fqpn, + 0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 }, + { "default-ports", 256, "Use default ports when possible", + (uchar **) &g_default_ports, (uchar **) &g_default_ports, + 0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 }, + { "mode", 256, "Mode 0=interactive 1=regression 2=bench", + (uchar **) &g_mode, (uchar **) &g_mode, + 0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 }, + { "quit", 256, "Quit before starting tests", + (uchar **) &g_mode, (uchar **) &g_do_quit, + 0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; -const int arg_count = 10; +const int p_ndb = atrt_process::AP_NDB_MGMD | atrt_process::AP_NDBD; +const int p_servers = atrt_process::AP_MYSQLD; +const int p_clients = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API; int -main(int argc, const char ** argv){ +main(int argc, char ** argv) +{ ndb_init(); bool restart = true; int lineno = 1; int test_no = 1; + int return_code = 1; - const int p_ndb = atrt_process::NDB_MGM | atrt_process::NDB_DB; - const int p_servers = atrt_process::MYSQL_SERVER | atrt_process::NDB_REP; - const int p_clients = atrt_process::MYSQL_CLIENT | atrt_process::NDB_API; - g_logger.setCategory(progname); g_logger.enable(Logger::LL_ALL); g_logger.createConsoleHandler(); if(!parse_args(argc, argv)) goto end; - + g_logger.info("Starting..."); - if(!setup_config(g_config)) + g_config.m_generated = false; + g_config.m_replication = g_replicate; + if (!setup_config(g_config)) + goto end; + + if (!configure(g_config, g_do_setup)) goto end; + g_logger.info("Setting up directories"); + if (!setup_directories(g_config, g_do_setup)) + goto end; + + if (g_do_setup) + { + g_logger.info("Setting up files"); + if (!setup_files(g_config, g_do_setup, g_do_sshx)) + goto end; + } + + if (g_do_deploy) + { + if (!deploy(g_config)) + goto end; + } + + if (g_do_quit) + { + return_code = 0; + goto end; + } + + if(!setup_hosts(g_config)) + goto end; + + if (g_do_sshx) + { + g_logger.info("Starting xterm-ssh"); + if (!sshx(g_config, g_do_sshx)) + goto end; + + g_logger.info("Done...sleeping"); + while(true) + { + NdbSleep_SecSleep(1); + } + return_code = 0; + goto end; + } + g_logger.info("Connecting to hosts"); if(!connect_hosts(g_config)) goto end; - if(!setup_hosts(g_config)) + if (g_do_start && !g_test_case_filename) + { + g_logger.info("Starting server processes: %x", g_do_start); + if (!start(g_config, g_do_start)) + goto end; + + g_logger.info("Done...sleeping"); + while(true) + { + NdbSleep_SecSleep(1); + } + return_code = 0; goto end; + } + return_code = 0; + /** * Main loop */ @@ -114,37 +235,25 @@ main(int argc, const char ** argv){ * Do we need to restart ndb */ if(restart){ - g_logger.info("(Re)starting ndb processes"); + g_logger.info("(Re)starting server processes processes"); if(!stop_processes(g_config, ~0)) goto end; - if(!start_processes(g_config, atrt_process::NDB_MGM)) + if (!setup_directories(g_config, 2)) goto end; - if(!connect_ndb_mgm(g_config)){ - goto end; - } - - if(!start_processes(g_config, atrt_process::NDB_DB)) + if (!setup_files(g_config, 2, 1)) goto end; - if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED)) + if(!setup_hosts(g_config)) goto end; - for(Uint32 i = 0; i<3; i++) - if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED)) - goto started; - - goto end; - - started: - if(!start_processes(g_config, p_servers)) - goto end; - - g_logger.info("Ndb start completed"); + if (!start(g_config, p_ndb | p_servers)) + goto end; + g_logger.info("All servers start completed"); } - const int start_line = lineno; + // const int start_line = lineno; atrt_testcase test_case; if(!read_test_case(g_test_case_file, test_case, lineno)) goto end; @@ -165,7 +274,7 @@ main(int argc, const char ** argv){ const time_t start = time(0); time_t now = start; do { - if(!update_status(g_config, atrt_process::ALL)) + if(!update_status(g_config, atrt_process::AP_ALL)) goto end; int count = 0; @@ -189,7 +298,7 @@ main(int argc, const char ** argv){ result = ERR_MAX_TIME_ELAPSED; break; } - sleep(1); + NdbSleep_SecSleep(1); } while(true); const time_t elapsed = time(0) - start; @@ -197,7 +306,8 @@ main(int argc, const char ** argv){ if(!stop_processes(g_config, p_clients)) goto end; - if(!gather_result(g_config, &result)) + int tmp, *rp = result ? &tmp : &result; + if(!gather_result(g_config, rp)) goto end; g_logger.info("#%d %s(%d)", @@ -205,29 +315,35 @@ main(int argc, const char ** argv){ (result == 0 ? "OK" : "FAILED"), result); if(g_report_file != 0){ - fprintf(g_report_file, "%s %s ; %d ; %d ; %ld\n", - test_case.m_command.c_str(), - test_case.m_args.c_str(), - test_no, result, elapsed); + fprintf(g_report_file, "%s ; %d ; %d ; %ld\n", + test_case.m_name.c_str(), test_no, result, elapsed); fflush(g_report_file); } - if(test_case.m_report || g_mode_bench || (g_mode_regression && result)){ - BaseString tmp; - tmp.assfmt("result.%d", test_no); - if(rename("result", tmp.c_str()) != 0){ - g_logger.critical("Failed to rename %s as %s", - "result", tmp.c_str()); - goto end; - } - } - - if(g_mode_interactive && result){ + if(g_mode == 0 && result){ g_logger.info ("Encountered failed test in interactive mode - terminating"); break; } + BaseString resdir; + resdir.assfmt("result.%d", test_no); + remove_dir(resdir.c_str(), true); + + if(test_case.m_report || g_mode == 2 || (g_mode && result)) + { + if(rename("result", resdir.c_str()) != 0) + { + g_logger.critical("Failed to rename %s as %s", + "result", resdir.c_str()); + goto end; + } + } + else + { + remove_dir("result", true); + } + if(result != 0){ restart = true; } else { @@ -247,276 +363,254 @@ main(int argc, const char ** argv){ g_test_case_file = 0; } - stop_processes(g_config, atrt_process::ALL); + stop_processes(g_config, atrt_process::AP_ALL); + return return_code; +} + +static +my_bool +get_one_option(int arg, const struct my_option * opt, char * value) +{ + if (arg == 1024) + { + if (g_replicate.length()) + g_replicate.append(";"); + g_replicate.append(value); + return 1; + } return 0; } bool -parse_args(int argc, const char** argv){ - int optind = 0; - if(getarg(args, arg_count, argc, argv, &optind)) { - arg_printusage(args, arg_count, progname, ""); +parse_args(int argc, char** argv) +{ + char buf[2048]; + if (getcwd(buf, sizeof(buf)) == 0) + { + g_logger.error("Unable to get current working directory"); return false; } - - if(g_log_filename != 0){ - g_logger.removeConsoleHandler(); - g_logger.addHandler(new FileLogHandler(g_log_filename)); + g_cwd = strdup(buf); + + struct stat sbuf; + BaseString mycnf; + if (argc > 1 && lstat(argv[argc-1], &sbuf) == 0) + { + mycnf.append(g_cwd); + mycnf.append(PATH_SEPARATOR); + mycnf.append(argv[argc-1]); } - + else { - int tmp = Logger::LL_WARNING - g_verbosity; - tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp); - g_logger.disable(Logger::LL_ALL); - g_logger.enable(Logger::LL_ON); - g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT); + mycnf.append(g_cwd); + mycnf.append(PATH_SEPARATOR); + mycnf.append("my.cnf"); + if (lstat(mycnf.c_str(), &sbuf) != 0) + { + g_logger.error("Unable to stat %s", mycnf.c_str()); + return false; + } } + g_logger.info("Bootstrapping using %s", mycnf.c_str()); + + const char *groups[] = { "atrt", 0 }; + int ret = load_defaults(mycnf.c_str(), groups, &argc, &argv); + + save_file = my_defaults_file; + save_extra_file = my_defaults_extra_file; + save_group_suffix = my_defaults_group_suffix; - - if(!g_process_config_filename){ - g_logger.critical("Process config not specified!"); + if (save_extra_file) + { + g_logger.error("--defaults-extra-file(%s) is not supported...", + save_extra_file); return false; } - if(!g_setup_path){ - char buf[1024]; - if(getcwd(buf, sizeof(buf))){ - g_setup_path = strdup(buf); - g_logger.info("Setup path not specified, using %s", buf); - } else { - g_logger.critical("Setup path not specified!\n"); - return false; - } - } - - if(g_report & !g_report_filename){ - g_report_filename = "report.txt"; + if (ret || handle_options(&argc, &argv, g_options, get_one_option)) + { + g_logger.error("Failed to load defaults/handle_options"); + return false; } - if(g_report_filename){ - g_report_file = fopen(g_report_filename, "w"); - if(g_report_file == 0){ - g_logger.critical("Unable to create report file: %s", g_report_filename); - return false; + if (argc >= 2) + { + const char * arg = argv[argc-2]; + while(* arg) + { + switch(* arg){ + case 'c': + g_do_setup = (g_do_setup == 0) ? 1 : g_do_setup; + break; + case 'C': + g_do_setup = 2; + break; + case 'd': + g_do_deploy = 1; + break; + case 'x': + g_do_sshx = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API; + break; + case 'X': + g_do_sshx = atrt_process::AP_ALL; + break; + case 's': + g_do_start = p_ndb; + break; + case 'S': + g_do_start = p_ndb | p_servers; + break; + case 'f': + g_fqpn = 1; + break; + case 'q': + g_do_quit = 1; + break; + default: + g_logger.error("Unknown switch '%c'", *arg); + return false; + } + arg++; } } - if(g_test_case_filename){ - g_test_case_file = fopen(g_test_case_filename, "r"); - if(g_test_case_file == 0){ - g_logger.critical("Unable to open file: %s", g_test_case_filename); - return false; - } + if(g_log_filename != 0) + { + g_logger.removeConsoleHandler(); + g_logger.addHandler(new FileLogHandler(g_log_filename)); } - int sum = g_mode_interactive + g_mode_regression + g_mode_bench; - if(sum == 0){ - g_mode_interactive = 1; - } - - if(sum > 1){ - g_logger.critical - ("Only one of bench/regression/interactive can be specified"); - return false; + { + int tmp = Logger::LL_WARNING - g_verbosity; + tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp); + g_logger.disable(Logger::LL_ALL); + g_logger.enable(Logger::LL_ON); + g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT); } - g_default_user = strdup(getenv("LOGNAME")); - - return true; -} - - -static -atrt_host * -find(const BaseString& host, Vector<atrt_host> & hosts){ - for(size_t i = 0; i<hosts.size(); i++){ - if(hosts[i].m_hostname == host){ - return &hosts[i]; - } + if(!g_basedir) + { + g_basedir = g_cwd; + g_logger.info("basedir not specified, using %s", g_basedir); } - return 0; -} -bool -setup_config(atrt_config& config){ - - FILE * f = fopen(g_process_config_filename, "r"); - if(!f){ - g_logger.critical("Failed to open process config file: %s", - g_process_config_filename); - return false; + if (!g_prefix) + { + g_prefix = DEFAULT_PREFIX; } - bool result = true; - - int lineno = 0; - char buf[2048]; - BaseString connect_string; - int mysql_port_offset = 0; - while(fgets(buf, 2048, f)){ - lineno++; - - BaseString tmp(buf); - tmp.trim(" \t\n\r"); - - if(tmp.length() == 0 || tmp == "" || tmp.c_str()[0] == '#') - continue; - - Vector<BaseString> split1; - if(tmp.split(split1, ":", 2) != 2){ - g_logger.warning("Invalid line %d in %s - ignoring", - lineno, g_process_config_filename); - continue; + + /** + * Add path to atrt-*.sh + */ + { + BaseString tmp; + const char* env = getenv("PATH"); + if (env && strlen(env)) + { + tmp.assfmt("PATH=%s:%s/mysql-test/ndb", + env, g_prefix); } - - if(split1[0].trim() == "basedir"){ - g_default_base_dir = strdup(split1[1].trim().c_str()); - continue; + else + { + tmp.assfmt("PATH=%s/mysql-test/ndb", g_prefix); } + g_env_path = strdup(tmp.c_str()); + putenv(g_env_path); + } + + if (g_help) + { + my_print_help(g_options); + my_print_variables(g_options); + return 0; + } - if(split1[0].trim() == "baseport"){ - g_default_base_port = atoi(split1[1].trim().c_str()); - continue; + if(g_test_case_filename) + { + g_test_case_file = fopen(g_test_case_filename, "r"); + if(g_test_case_file == 0) + { + g_logger.critical("Unable to open file: %s", g_test_case_filename); + return false; } + if (g_do_setup == 0) + g_do_setup = 2; + + if (g_do_start == 0) + g_do_start = p_ndb | p_servers; + + if (g_mode == 0) + g_mode = 1; - if(split1[0].trim() == "user"){ - g_default_user = strdup(split1[1].trim().c_str()); - continue; + if (g_do_sshx) + { + g_logger.critical("ssx specified...not possible with testfile"); + return false; } - - if(split1[0].trim() == "mysqld-use-base" && split1[1].trim() == "no"){ - g_mysqld_use_base = 0; - continue; + } + + if (g_do_setup == 0) + { + BaseString tmp; + tmp.append(g_basedir); + tmp.append(PATH_SEPARATOR); + tmp.append("my.cnf"); + if (lstat(tmp.c_str(), &sbuf) != 0) + { + g_logger.error("Unable to stat %s", tmp.c_str()); + return false; } - Vector<BaseString> hosts; - if(split1[1].trim().split(hosts) <= 0){ - g_logger.warning("Invalid line %d in %s - ignoring", - lineno, g_process_config_filename); + if (!S_ISREG(sbuf.st_mode)) + { + g_logger.error("%s is not a regular file", tmp.c_str()); + return false; } - // 1 - Check hosts - for(size_t i = 0; i<hosts.size(); i++){ - Vector<BaseString> tmp; - hosts[i].split(tmp, ":"); - BaseString hostname = tmp[0].trim(); - BaseString base_dir; - if(tmp.size() >= 2) - base_dir = tmp[1]; - else if(g_default_base_dir == 0){ - g_logger.critical("Basedir not specified..."); - return false; - } - - atrt_host * host_ptr; - if((host_ptr = find(hostname, config.m_hosts)) == 0){ - atrt_host host; - host.m_index = config.m_hosts.size(); - host.m_cpcd = new SimpleCpcClient(hostname.c_str(), 1234); - host.m_base_dir = (base_dir.empty() ? g_default_base_dir : base_dir); - host.m_user = g_default_user; - host.m_hostname = hostname.c_str(); - config.m_hosts.push_back(host); - } else { - if(!base_dir.empty() && (base_dir == host_ptr->m_base_dir)){ - g_logger.critical("Inconsistent base dir definition for host %s" - ", \"%s\" != \"%s\"", hostname.c_str(), - base_dir.c_str(), host_ptr->m_base_dir.c_str()); - return false; - } - } - } - - for(size_t i = 0; i<hosts.size(); i++){ - BaseString & tmp = hosts[i]; - atrt_host * host = find(tmp, config.m_hosts); - BaseString & dir = host->m_base_dir; - - const int index = config.m_processes.size() + 1; - - atrt_process proc; - proc.m_index = index; - proc.m_host = host; - proc.m_proc.m_id = -1; - proc.m_proc.m_type = "temporary"; - proc.m_proc.m_owner = "atrt"; - proc.m_proc.m_group = "group"; - proc.m_proc.m_cwd.assign(dir).append("/run/"); - proc.m_proc.m_stdout = "log.out"; - proc.m_proc.m_stderr = "2>&1"; - proc.m_proc.m_runas = proc.m_host->m_user; - proc.m_proc.m_ulimit = "c:unlimited"; - proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str()); - proc.m_proc.m_shutdown_options = ""; - proc.m_hostname = proc.m_host->m_hostname; - proc.m_ndb_mgm_port = g_default_base_port; - if(split1[0] == "mgm"){ - proc.m_type = atrt_process::NDB_MGM; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_mgmd"); - proc.m_proc.m_path.assign(dir).append("/libexec/ndb_mgmd"); - proc.m_proc.m_args = "--nodaemon -f config.ini"; - proc.m_proc.m_cwd.appfmt("%d.ndb_mgmd", index); - connect_string.appfmt("host=%s:%d;", - proc.m_hostname.c_str(), proc.m_ndb_mgm_port); - } else if(split1[0] == "ndb"){ - proc.m_type = atrt_process::NDB_DB; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndbd"); - proc.m_proc.m_path.assign(dir).append("/libexec/ndbd"); - proc.m_proc.m_args = "--initial --nodaemon -n"; - proc.m_proc.m_cwd.appfmt("%d.ndbd", index); - } else if(split1[0] == "mysqld"){ - proc.m_type = atrt_process::MYSQL_SERVER; - proc.m_proc.m_name.assfmt("%d-%s", index, "mysqld"); - proc.m_proc.m_path.assign(dir).append("/libexec/mysqld"); - proc.m_proc.m_args = "--core-file --ndbcluster"; - proc.m_proc.m_cwd.appfmt("%d.mysqld", index); - proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice - } else if(split1[0] == "api"){ - proc.m_type = atrt_process::NDB_API; - proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api"); - proc.m_proc.m_path = ""; - proc.m_proc.m_args = ""; - proc.m_proc.m_cwd.appfmt("%d.ndb_api", index); - } else if(split1[0] == "mysql"){ - proc.m_type = atrt_process::MYSQL_CLIENT; - proc.m_proc.m_name.assfmt("%d-%s", index, "mysql"); - proc.m_proc.m_path = ""; - proc.m_proc.m_args = ""; - proc.m_proc.m_cwd.appfmt("%d.mysql", index); - } else { - g_logger.critical("%s:%d: Unhandled process type: %s", - g_process_config_filename, lineno, - split1[0].c_str()); - result = false; - goto end; - } - config.m_processes.push_back(proc); + g_my_cnf = strdup(tmp.c_str()); + g_logger.info("Using %s", tmp.c_str()); + } + else + { + g_my_cnf = strdup(mycnf.c_str()); + } + + g_logger.info("Using --prefix=\"%s\"", g_prefix); + + if(g_report_filename) + { + g_report_file = fopen(g_report_filename, "w"); + if(g_report_file == 0) + { + g_logger.critical("Unable to create report file: %s", g_report_filename); + return false; } } - - // Setup connect string - for(size_t i = 0; i<config.m_processes.size(); i++){ - config.m_processes[i].m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s", - connect_string.c_str()); + + if (g_clusters == 0) + { + g_logger.critical("No clusters specified"); + return false; } - end: - fclose(f); - return result; + g_user = strdup(getenv("LOGNAME")); + + return true; } bool connect_hosts(atrt_config& config){ for(size_t i = 0; i<config.m_hosts.size(); i++){ - if(config.m_hosts[i].m_cpcd->connect() != 0){ + if(config.m_hosts[i]->m_cpcd->connect() != 0){ g_logger.error("Unable to connect to cpc %s:%d", - config.m_hosts[i].m_cpcd->getHost(), - config.m_hosts[i].m_cpcd->getPort()); + config.m_hosts[i]->m_cpcd->getHost(), + config.m_hosts[i]->m_cpcd->getPort()); return false; } g_logger.debug("Connected to %s:%d", - config.m_hosts[i].m_cpcd->getHost(), - config.m_hosts[i].m_cpcd->getPort()); + config.m_hosts[i]->m_cpcd->getHost(), + config.m_hosts[i]->m_cpcd->getPort()); } return true; @@ -529,8 +623,10 @@ connect_ndb_mgm(atrt_process & proc){ g_logger.critical("Unable to create mgm handle"); return false; } - BaseString tmp = proc.m_hostname; - tmp.appfmt(":%d", proc.m_ndb_mgm_port); + BaseString tmp = proc.m_host->m_hostname; + const char * val; + proc.m_options.m_loaded.get("--PortNumber=", &val); + tmp.appfmt(":%s", val); if (ndb_mgm_set_connectstring(handle,tmp.c_str())) { @@ -551,8 +647,8 @@ connect_ndb_mgm(atrt_process & proc){ bool connect_ndb_mgm(atrt_config& config){ for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; - if((proc.m_type & atrt_process::NDB_MGM) != 0){ + atrt_process & proc = *config.m_processes[i]; + if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){ if(!connect_ndb_mgm(proc)){ return false; } @@ -573,100 +669,110 @@ wait_ndb(atrt_config& config, int goal){ goal = remap(goal); - - /** - * Get mgm handle for cluster - */ - NdbMgmHandle handle = 0; - for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; - if((proc.m_type & atrt_process::NDB_MGM) != 0){ - handle = proc.m_ndb_mgm_handle; - break; - } - } - if(handle == 0){ - g_logger.critical("Unable to find mgm handle"); - return false; - } - - if(goal == NDB_MGM_NODE_STATUS_STARTED){ + size_t cnt = 0; + for (size_t i = 0; i<config.m_clusters.size(); i++) + { + atrt_cluster* cluster = config.m_clusters[i]; /** - * 1) wait NOT_STARTED - * 2) send start - * 3) wait STARTED + * Get mgm handle for cluster */ - if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED)) + NdbMgmHandle handle = 0; + for(size_t j = 0; j<cluster->m_processes.size(); j++){ + atrt_process & proc = *cluster->m_processes[j]; + if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){ + handle = proc.m_ndb_mgm_handle; + break; + } + } + + if(handle == 0){ + g_logger.critical("Unable to find mgm handle"); return false; + } - ndb_mgm_start(handle, 0, 0); - } - - struct ndb_mgm_cluster_state * state; - - time_t now = time(0); - time_t end = now + 360; - int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT); - int min2 = goal; - - while(now < end){ - /** - * 1) retreive current state - */ - state = 0; - do { - state = ndb_mgm_get_status(handle); - if(state == 0){ - const int err = ndb_mgm_get_latest_error(handle); - g_logger.error("Unable to poll db state: %d %s %s", - ndb_mgm_get_latest_error(handle), - ndb_mgm_get_latest_error_msg(handle), - ndb_mgm_get_latest_error_desc(handle)); - if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){ - g_logger.error("Reconnected..."); - continue; - } + if(goal == NDB_MGM_NODE_STATUS_STARTED){ + /** + * 1) wait NOT_STARTED + * 2) send start + * 3) wait STARTED + */ + if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED)) return false; - } - } while(state == 0); - NdbAutoPtr<void> tmp(state); + + ndb_mgm_start(handle, 0, 0); + } + + struct ndb_mgm_cluster_state * state; + + time_t now = time(0); + time_t end = now + 360; + int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT); + int min2 = goal; - min2 = goal; - for(int i = 0; i<state->no_of_nodes; i++){ - if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB){ - const int s = remap(state->node_states[i].node_status); - min2 = (min2 < s ? min2 : s ); - - if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) || - s > NDB_MGM_NODE_STATUS_STARTED){ - g_logger.critical("Strange DB status during start: %d %d", i, min2); + while(now < end){ + /** + * 1) retreive current state + */ + state = 0; + do { + state = ndb_mgm_get_status(handle); + if(state == 0){ + const int err = ndb_mgm_get_latest_error(handle); + g_logger.error("Unable to poll db state: %d %s %s", + ndb_mgm_get_latest_error(handle), + ndb_mgm_get_latest_error_msg(handle), + ndb_mgm_get_latest_error_desc(handle)); + if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){ + g_logger.error("Reconnected..."); + continue; + } return false; } - - if(min2 < min){ - g_logger.critical("wait ndb failed node: %d %d %d %d", - state->node_states[i].node_id, min, min2, goal); + } while(state == 0); + NdbAutoPtr<void> tmp(state); + + min2 = goal; + for(int j = 0; j<state->no_of_nodes; j++){ + if(state->node_states[j].node_type == NDB_MGM_NODE_TYPE_NDB){ + const int s = remap(state->node_states[j].node_status); + min2 = (min2 < s ? min2 : s ); + + if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) || + s > NDB_MGM_NODE_STATUS_STARTED){ + g_logger.critical("Strange DB status during start: %d %d", + j, min2); + return false; + } + + if(min2 < min){ + g_logger.critical("wait ndb failed node: %d %d %d %d", + state->node_states[j].node_id, min, min2, goal); + } } } + + if(min2 < min){ + g_logger.critical("wait ndb failed %d %d %d", min, min2, goal); + return false; + } + + if(min2 == goal){ + cnt++; + goto next; + } + + min = min2; + now = time(0); } - if(min2 < min){ - g_logger.critical("wait ndb failed %d %d %d", min, min2, goal); - return false; - } - - if(min2 == goal){ - return true; - break; - } - - min = min2; - now = time(0); + g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal); + break; + +next: + ; } - - g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal); - - return false; + + return cnt == config.m_clusters.size(); } bool @@ -676,21 +782,19 @@ start_process(atrt_process & proc){ return false; } - BaseString path = proc.m_proc.m_cwd.substr(proc.m_host->m_base_dir.length()+BaseString("/run").length()); - BaseString tmp = g_setup_progname; - tmp.appfmt(" %s %s/%s/ %s", + tmp.appfmt(" %s %s/ %s", proc.m_host->m_hostname.c_str(), - g_setup_path, - path.c_str(), + proc.m_proc.m_cwd.c_str(), proc.m_proc.m_cwd.c_str()); - + + g_logger.debug("system(%s)", tmp.c_str()); const int r1 = system(tmp.c_str()); if(r1 != 0){ g_logger.critical("Failed to setup process"); return false; } - + { Properties reply; if(proc.m_host->m_cpcd->define_process(proc.m_proc, reply) != 0){ @@ -715,7 +819,7 @@ start_process(atrt_process & proc){ bool start_processes(atrt_config& config, int types){ for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; + atrt_process & proc = *config.m_processes[i]; if((types & proc.m_type) != 0 && proc.m_proc.m_path != ""){ if(!start_process(proc)){ return false; @@ -760,7 +864,7 @@ stop_process(atrt_process & proc){ bool stop_processes(atrt_config& config, int types){ for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; + atrt_process & proc = *config.m_processes[i]; if((types & proc.m_type) != 0){ if(!stop_process(proc)){ return false; @@ -779,11 +883,11 @@ update_status(atrt_config& config, int){ m_procs.fill(config.m_hosts.size(), dummy); for(size_t i = 0; i<config.m_hosts.size(); i++){ Properties p; - config.m_hosts[i].m_cpcd->list_processes(m_procs[i], p); + config.m_hosts[i]->m_cpcd->list_processes(m_procs[i], p); } for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; + atrt_process & proc = *config.m_processes[i]; if(proc.m_proc.m_id != -1){ Vector<SimpleCpcClient::Process> &h_procs= m_procs[proc.m_host->m_index]; bool found = false; @@ -798,7 +902,7 @@ update_status(atrt_config& config, int){ g_logger.error("update_status: not found"); g_logger.error("id: %d host: %s cmd: %s", proc.m_proc.m_id, - proc.m_hostname.c_str(), + proc.m_host->m_hostname.c_str(), proc.m_proc.m_path.c_str()); for(size_t j = 0; j<h_procs.size(); j++){ g_logger.error("found: %d %s", h_procs[j].m_id, @@ -815,7 +919,7 @@ int is_running(atrt_config& config, int types){ int found = 0, running = 0; for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; + atrt_process & proc = *config.m_processes[i]; if((types & proc.m_type) != 0){ found++; if(proc.m_proc.m_status == "running") @@ -910,12 +1014,24 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){ tc.m_run_all= true; else tc.m_run_all= false; + + if (!p.get("name", &mt)) + { + tc.m_name.assfmt("%s %s", + tc.m_command.c_str(), + tc.m_args.c_str()); + } + else + { + tc.m_name.assign(mt); + } return true; } bool setup_test_case(atrt_config& config, const atrt_testcase& tc){ + g_logger.debug("system(%s)", g_clear_progname); const int r1 = system(g_clear_progname); if(r1 != 0){ g_logger.critical("Failed to clear result"); @@ -923,19 +1039,24 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){ } size_t i = 0; - for(; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; - if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){ - proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(), - tc.m_command.c_str()); + for(; i<config.m_processes.size(); i++) + { + atrt_process & proc = *config.m_processes[i]; + if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){ + proc.m_proc.m_path = ""; + if (tc.m_command.c_str()[0] != '/') + { + proc.m_proc.m_path.appfmt("%s/bin/", g_prefix); + } + proc.m_proc.m_path.append(tc.m_command.c_str()); proc.m_proc.m_args.assign(tc.m_args); if(!tc.m_run_all) break; } } for(i++; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; - if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){ + atrt_process & proc = *config.m_processes[i]; + if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){ proc.m_proc.m_path.assign(""); proc.m_proc.m_args.assign(""); } @@ -946,24 +1067,27 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){ bool gather_result(atrt_config& config, int * result){ BaseString tmp = g_gather_progname; - for(size_t i = 0; i<config.m_processes.size(); i++){ - atrt_process & proc = config.m_processes[i]; - if(proc.m_proc.m_path != ""){ - tmp.appfmt(" %s:%s", - proc.m_hostname.c_str(), - proc.m_proc.m_cwd.c_str()); - } + + for(size_t i = 0; i<config.m_hosts.size(); i++) + { + tmp.appfmt(" %s:%s/*", + config.m_hosts[i]->m_hostname.c_str(), + config.m_hosts[i]->m_basedir.c_str()); } - + + g_logger.debug("system(%s)", tmp.c_str()); const int r1 = system(tmp.c_str()); - if(r1 != 0){ - g_logger.critical("Failed to gather result"); + if(r1 != 0) + { + g_logger.critical("Failed to gather result!"); return false; } - + + g_logger.debug("system(%s)", g_analyze_progname); const int r2 = system(g_analyze_progname); - - if(r2 == -1 || r2 == (127 << 8)){ + + if(r2 == -1 || r2 == (127 << 8)) + { g_logger.critical("Failed to analyze results"); return false; } @@ -974,6 +1098,7 @@ gather_result(atrt_config& config, int * result){ bool setup_hosts(atrt_config& config){ + g_logger.debug("system(%s)", g_clear_progname); const int r1 = system(g_clear_progname); if(r1 != 0){ g_logger.critical("Failed to clear result"); @@ -982,21 +1107,143 @@ setup_hosts(atrt_config& config){ for(size_t i = 0; i<config.m_hosts.size(); i++){ BaseString tmp = g_setup_progname; - tmp.appfmt(" %s %s/ %s/run", - config.m_hosts[i].m_hostname.c_str(), - g_setup_path, - config.m_hosts[i].m_base_dir.c_str()); + tmp.appfmt(" %s %s/ %s/", + config.m_hosts[i]->m_hostname.c_str(), + g_basedir, + config.m_hosts[i]->m_basedir.c_str()); + g_logger.debug("system(%s)", tmp.c_str()); const int r1 = system(tmp.c_str()); if(r1 != 0){ g_logger.critical("Failed to setup %s", - config.m_hosts[i].m_hostname.c_str()); + config.m_hosts[i]->m_hostname.c_str()); + return false; + } + } + return true; +} + +bool +deploy(atrt_config & config) +{ + for (size_t i = 0; i<config.m_hosts.size(); i++) + { + BaseString tmp = g_setup_progname; + tmp.appfmt(" %s %s/ %s", + config.m_hosts[i]->m_hostname.c_str(), + g_prefix, + g_prefix); + + g_logger.info("rsyncing %s to %s", g_prefix, + config.m_hosts[i]->m_hostname.c_str()); + g_logger.debug("system(%s)", tmp.c_str()); + const int r1 = system(tmp.c_str()); + if(r1 != 0) + { + g_logger.critical("Failed to rsync %s to %s", + g_prefix, + config.m_hosts[i]->m_hostname.c_str()); return false; } } + return true; } +bool +sshx(atrt_config & config, unsigned mask) +{ + for (size_t i = 0; i<config.m_processes.size(); i++) + { + atrt_process & proc = *config.m_processes[i]; + + BaseString tmp; + const char * type = 0; + switch(proc.m_type){ + case atrt_process::AP_NDB_MGMD: + type = (mask & proc.m_type) ? "ndb_mgmd" : 0; + break; + case atrt_process::AP_NDBD: + type = (mask & proc.m_type) ? "ndbd" : 0; + break; + case atrt_process::AP_MYSQLD: + type = (mask & proc.m_type) ? "mysqld" : 0; + break; + case atrt_process::AP_NDB_API: + type = (mask & proc.m_type) ? "ndbapi" : 0; + break; + case atrt_process::AP_CLIENT: + type = (mask & proc.m_type) ? "client" : 0; + break; + default: + type = "<unknown>"; + } + + if (type == 0) + continue; + + tmp.appfmt("xterm -fg black -title \"%s(%s) on %s\"" + " -e 'ssh -t -X %s sh %s/ssh-login.sh' &", + type, + proc.m_cluster->m_name.c_str(), + proc.m_host->m_hostname.c_str(), + proc.m_host->m_hostname.c_str(), + proc.m_proc.m_cwd.c_str()); + + g_logger.debug("system(%s)", tmp.c_str()); + const int r1 = system(tmp.c_str()); + if(r1 != 0) + { + g_logger.critical("Failed sshx (%s)", + tmp.c_str()); + return false; + } + NdbSleep_MilliSleep(300); // To prevent xlock problem + } + + return true; +} + +bool +start(atrt_config & config, unsigned proc_mask) +{ + if (proc_mask & atrt_process::AP_NDB_MGMD) + if(!start_processes(g_config, atrt_process::AP_NDB_MGMD)) + return false; + + if (proc_mask & atrt_process::AP_NDBD) + { + if(!connect_ndb_mgm(g_config)){ + return false; + } + + if(!start_processes(g_config, atrt_process::AP_NDBD)) + return false; + + if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED)) + return false; + + for(Uint32 i = 0; i<3; i++) + if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED)) + goto started; + return false; + } + +started: + if(!start_processes(g_config, p_servers & proc_mask)) + return false; + + return true; +} + +void +require(bool x) +{ + if (!x) + abort(); +} + template class Vector<Vector<SimpleCpcClient::Process> >; -template class Vector<atrt_host>; -template class Vector<atrt_process>; +template class Vector<atrt_host*>; +template class Vector<atrt_cluster*>; +template class Vector<atrt_process*>; diff --git a/storage/ndb/test/run-test/run-test.hpp b/storage/ndb/test/run-test/run-test.hpp deleted file mode 100644 index 2b259e83a60..00000000000 --- a/storage/ndb/test/run-test/run-test.hpp +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef atrt_config_hpp -#define atrt_config_hpp - -#include <getarg.h> -#include <Vector.hpp> -#include <BaseString.hpp> -#include <Logger.hpp> -#include <mgmapi.h> -#include <CpcClient.hpp> - -#undef MYSQL_CLIENT - -enum ErrorCodes { - ERR_OK = 0, - ERR_NDB_FAILED = 101, - ERR_SERVERS_FAILED = 102, - ERR_MAX_TIME_ELAPSED = 103 -}; - -struct atrt_host { - size_t m_index; - BaseString m_user; - BaseString m_base_dir; - BaseString m_hostname; - SimpleCpcClient * m_cpcd; -}; - -struct atrt_process { - size_t m_index; - BaseString m_hostname; - struct atrt_host * m_host; - - enum Type { - ALL = 255, - NDB_DB = 1, - NDB_API = 2, - NDB_MGM = 4, - NDB_REP = 8, - MYSQL_SERVER = 16, - MYSQL_CLIENT = 32 - } m_type; - - SimpleCpcClient::Process m_proc; - short m_ndb_mgm_port; - NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm -}; - -struct atrt_config { - BaseString m_key; - Vector<atrt_host> m_hosts; - Vector<atrt_process> m_processes; -}; - -struct atrt_testcase { - bool m_report; - bool m_run_all; - time_t m_max_time; - BaseString m_command; - BaseString m_args; -}; - -extern Logger g_logger; - -bool parse_args(int argc, const char** argv); -bool setup_config(atrt_config&); -bool connect_hosts(atrt_config&); -bool connect_ndb_mgm(atrt_config&); -bool wait_ndb(atrt_config&, int ndb_mgm_node_status); -bool start_processes(atrt_config&, int); -bool stop_processes(atrt_config&, int); -bool update_status(atrt_config&, int); -int is_running(atrt_config&, int); -bool gather_result(atrt_config&, int * result); - -bool read_test_case(FILE *, atrt_testcase&, int& line); -bool setup_test_case(atrt_config&, const atrt_testcase&); - -bool setup_hosts(atrt_config&); - -#endif diff --git a/storage/ndb/test/run-test/setup.cpp b/storage/ndb/test/run-test/setup.cpp new file mode 100644 index 00000000000..cbb7a34f171 --- /dev/null +++ b/storage/ndb/test/run-test/setup.cpp @@ -0,0 +1,965 @@ +#include "atrt.hpp" +#include <ndb_global.h> +#include <my_sys.h> +#include <my_getopt.h> +#include <NdbOut.hpp> + +static NdbOut& operator<<(NdbOut& out, const atrt_process& proc); +static atrt_host * find(const char * hostname, Vector<atrt_host*>&); +static bool load_process(atrt_config&, atrt_cluster&, atrt_process::Type, + size_t idx, const char * hostname); +static bool load_options(int argc, char** argv, int type, atrt_options&); + +enum { + PO_NDB = atrt_options::AO_NDBCLUSTER + + ,PO_REP_SLAVE = 256 + ,PO_REP_MASTER = 512 + ,PO_REP = (atrt_options::AO_REPLICATION | PO_REP_SLAVE | PO_REP_MASTER) +}; + +struct proc_option +{ + const char * name; + int type; + int options; +}; + +static +struct proc_option f_options[] = { + { "--FileSystemPath=", atrt_process::AP_NDBD, 0 } + ,{ "--PortNumber=", atrt_process::AP_NDB_MGMD, 0 } + ,{ "--datadir=", atrt_process::AP_MYSQLD, 0 } + ,{ "--socket=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 } + ,{ "--port=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 } + ,{ "--server-id=", atrt_process::AP_MYSQLD, PO_REP } + ,{ "--log-bin", atrt_process::AP_MYSQLD, PO_REP_MASTER } + ,{ "--master-host=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-port=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-user=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--master-password=", atrt_process::AP_MYSQLD, PO_REP_SLAVE } + ,{ "--ndb-connectstring=", atrt_process::AP_MYSQLD | atrt_process::AP_CLUSTER + ,PO_NDB } + ,{ "--ndbcluster", atrt_process::AP_MYSQLD, PO_NDB } + ,{ 0, 0, 0 } +}; +const char * ndbcs = "--ndb-connectstring="; + +bool +setup_config(atrt_config& config) +{ + BaseString tmp(g_clusters); + Vector<BaseString> clusters; + tmp.split(clusters, ","); + + bool fqpn = clusters.size() > 1 || g_fqpn; + + size_t j,k; + for (size_t i = 0; i<clusters.size(); i++) + { + struct atrt_cluster *cluster = new atrt_cluster; + config.m_clusters.push_back(cluster); + + cluster->m_name = clusters[i]; + if (fqpn) + { + cluster->m_dir.assfmt("cluster%s/", cluster->m_name.c_str()); + } + else + { + cluster->m_dir = ""; + } + + int argc = 1; + const char * argv[] = { "atrt", 0, 0 }; + + BaseString buf; + buf.assfmt("--defaults-group-suffix=%s", clusters[i].c_str()); + argv[argc++] = buf.c_str(); + char ** tmp = (char**)argv; + const char *groups[] = { "cluster_config", 0 }; + int ret = load_defaults(g_my_cnf, groups, &argc, &tmp); + if (ret) + { + g_logger.error("Unable to load defaults for cluster: %s", + clusters[i].c_str()); + return false; + } + + struct + { + atrt_process::Type type; + const char * name; + const char * value; + } proc_args[] = { + { atrt_process::AP_NDB_MGMD, "--ndb_mgmd=", 0 }, + { atrt_process::AP_NDBD, "--ndbd=", 0 }, + { atrt_process::AP_NDB_API, "--ndbapi=", 0 }, + { atrt_process::AP_NDB_API, "--api=", 0 }, + { atrt_process::AP_MYSQLD, "--mysqld=", 0 }, + { atrt_process::AP_ALL, 0, 0} + }; + + /** + * Find all processes... + */ + for (j = 0; j<(size_t)argc; j++) + { + for (k = 0; proc_args[k].name; k++) + { + if (!strncmp(tmp[j], proc_args[k].name, strlen(proc_args[k].name))) + { + proc_args[k].value = tmp[j] + strlen(proc_args[k].name); + break; + } + } + } + + /** + * Load each process + */ + for (j = 0; proc_args[j].name; j++) + { + if (proc_args[j].value) + { + BaseString tmp(proc_args[j].value); + Vector<BaseString> list; + tmp.split(list, ","); + for (k = 0; k<list.size(); k++) + if (!load_process(config, *cluster, proc_args[j].type, + k + 1, list[k].c_str())) + return false; + } + } + + { + /** + * Load cluster options + */ + + argc = 1; + argv[argc++] = buf.c_str(); + const char *groups[] = { "mysql_cluster", 0 }; + ret = load_defaults(g_my_cnf, groups, &argc, &tmp); + + if (ret) + { + g_logger.error("Unable to load defaults for cluster: %s", + clusters[i].c_str()); + return false; + } + + load_options(argc, tmp, atrt_process::AP_CLUSTER, cluster->m_options); + } + } + return true; +} + +static +atrt_host * +find(const char * hostname, Vector<atrt_host*> & hosts){ + for (size_t i = 0; i<hosts.size(); i++){ + if (hosts[i]->m_hostname == hostname){ + return hosts[i]; + } + } + + atrt_host* host = new atrt_host; + host->m_index = hosts.size(); + host->m_cpcd = new SimpleCpcClient(hostname, 1234); + host->m_basedir = g_basedir; + host->m_user = g_user; + host->m_hostname = hostname; + hosts.push_back(host); + return host; +} + +static +bool +load_process(atrt_config& config, atrt_cluster& cluster, + atrt_process::Type type, + size_t idx, + const char * hostname) +{ + atrt_host * host_ptr = find(hostname, config.m_hosts); + atrt_process *proc_ptr = new atrt_process; + + config.m_processes.push_back(proc_ptr); + host_ptr->m_processes.push_back(proc_ptr); + cluster.m_processes.push_back(proc_ptr); + + atrt_process& proc = *proc_ptr; + + const size_t proc_no = config.m_processes.size(); + proc.m_index = idx; + proc.m_type = type; + proc.m_host = host_ptr; + proc.m_cluster = &cluster; + proc.m_options.m_features = 0; + proc.m_rep_src = 0; + proc.m_proc.m_id = -1; + proc.m_proc.m_type = "temporary"; + proc.m_proc.m_owner = "atrt"; + proc.m_proc.m_group = cluster.m_name.c_str(); + proc.m_proc.m_stdout = "log.out"; + proc.m_proc.m_stderr = "2>&1"; + proc.m_proc.m_runas = proc.m_host->m_user; + proc.m_proc.m_ulimit = "c:unlimited"; + proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", g_prefix); + proc.m_proc.m_env.appfmt(" MYSQL_HOME=%s", g_basedir); + proc.m_proc.m_shutdown_options = ""; + + int argc = 1; + const char * argv[] = { "atrt", 0, 0 }; + + BaseString buf[10]; + char ** tmp = (char**)argv; + const char *groups[] = { 0, 0, 0, 0 }; + switch(type){ + case atrt_process::AP_NDB_MGMD: + groups[0] = "cluster_config"; + buf[1].assfmt("cluster_config.ndb_mgmd.%d", idx); + groups[1] = buf[1].c_str(); + buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_NDBD: + groups[0] = "cluster_config"; + buf[1].assfmt("cluster_config.ndbd.%d", idx); + groups[1] = buf[1].c_str(); + buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_MYSQLD: + groups[0] = "mysqld"; + groups[1] = "mysql_cluster"; + buf[0].assfmt("--defaults-group-suffix=.%d%s",idx,cluster.m_name.c_str()); + argv[argc++] = buf[0].c_str(); + break; + case atrt_process::AP_CLIENT: + buf[0].assfmt("client.%d%s", idx, cluster.m_name.c_str()); + groups[0] = buf[0].c_str(); + break; + case atrt_process::AP_NDB_API: + break; + default: + g_logger.critical("Unhandled process type: %d", type); + return false; + } + + int ret = load_defaults(g_my_cnf, groups, &argc, &tmp); + if (ret) + { + g_logger.error("Unable to load defaults for cluster: %s", + cluster.m_name.c_str()); + return false; + } + + load_options(argc, tmp, type, proc.m_options); + + BaseString dir; + dir.assfmt("%s/%s", + proc.m_host->m_basedir.c_str(), + cluster.m_dir.c_str()); + + switch(type){ + case atrt_process::AP_NDB_MGMD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_mgmd"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndb_mgmd"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s", + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --nodaemon --mycnf"); + proc.m_proc.m_cwd.assfmt("%sndb_mgmd.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_NDBD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndbd"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbd"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s", + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --nodaemon -n"); + proc.m_proc.m_cwd.assfmt("%sndbd.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_MYSQLD: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysqld"); + proc.m_proc.m_path.assign(g_prefix).append("/libexec/mysqld"); + proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf", + proc.m_host->m_basedir.c_str()); + proc.m_proc.m_args.appfmt(" --defaults-group-suffix=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + proc.m_proc.m_args.append(" --core-file"); + proc.m_proc.m_cwd.appfmt("%smysqld.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_NDB_API: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_api"); + proc.m_proc.m_path = ""; + proc.m_proc.m_args = ""; + proc.m_proc.m_cwd.appfmt("%sndb_api.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s", + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_CLIENT: + { + proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysql"); + proc.m_proc.m_path = ""; + proc.m_proc.m_args = ""; + proc.m_proc.m_cwd.appfmt("%s/client.%d", dir.c_str(), proc.m_index); + proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s", + proc.m_index, + cluster.m_name.c_str()); + break; + } + case atrt_process::AP_ALL: + case atrt_process::AP_CLUSTER: + g_logger.critical("Unhandled process type: %d", proc.m_type); + return false; + } + + if (proc.m_proc.m_path.length()) + { + proc.m_proc.m_env.appfmt(" CMD=\"%s", proc.m_proc.m_path.c_str()); + if (proc.m_proc.m_args.length()) + proc.m_proc.m_env.append(" "); + proc.m_proc.m_env.append(proc.m_proc.m_args); + proc.m_proc.m_env.append("\" "); + } + + if (type == atrt_process::AP_MYSQLD) + { + /** + * Add a client for each mysqld + */ + if (!load_process(config, cluster, atrt_process::AP_CLIENT, idx, hostname)) + { + return false; + } + } + + if (type == atrt_process::AP_CLIENT) + { + proc.m_mysqld = cluster.m_processes[cluster.m_processes.size()-2]; + } + + return true; +} + +static +bool +load_options(int argc, char** argv, int type, atrt_options& opts) +{ + for (size_t i = 0; i<(size_t)argc; i++) + { + for (size_t j = 0; f_options[j].name; j++) + { + const char * name = f_options[j].name; + const size_t len = strlen(name); + + if ((f_options[j].type & type) && strncmp(argv[i], name, len) == 0) + { + opts.m_loaded.put(name, argv[i]+len, true); + break; + } + } + } + return true; +} + +struct proc_rule_ctx +{ + int m_setup; + atrt_config* m_config; + atrt_host * m_host; + atrt_cluster* m_cluster; + atrt_process* m_process; +}; + +struct proc_rule +{ + int type; + bool (* func)(Properties& prop, proc_rule_ctx&, int extra); + int extra; +}; + +static bool pr_check_replication(Properties&, proc_rule_ctx&, int); +static bool pr_check_features(Properties&, proc_rule_ctx&, int); +static bool pr_fix_client(Properties&, proc_rule_ctx&, int); +static bool pr_proc_options(Properties&, proc_rule_ctx&, int); +static bool pr_fix_ndb_connectstring(Properties&, proc_rule_ctx&, int); +static bool pr_set_ndb_connectstring(Properties&, proc_rule_ctx&, int); +static bool pr_check_proc(Properties&, proc_rule_ctx&, int); + +static +proc_rule f_rules[] = +{ + { atrt_process::AP_CLUSTER, pr_check_features, 0 } + ,{ atrt_process::AP_MYSQLD, pr_check_replication, 0 } + ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, + ~(PO_REP | PO_NDB) } + ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, PO_REP } + ,{ atrt_process::AP_CLIENT, pr_fix_client, 0 } + ,{ atrt_process::AP_CLUSTER, pr_fix_ndb_connectstring, 0 } + ,{ atrt_process::AP_MYSQLD, pr_set_ndb_connectstring, 0 } + ,{ atrt_process::AP_ALL, pr_check_proc, 0 } + ,{ 0, 0, 0 } +}; + +bool +configure(atrt_config& config, int setup) +{ + Properties props; + + for (size_t i = 0; f_rules[i].func; i++) + { + bool ok = true; + proc_rule_ctx ctx; + bzero(&ctx, sizeof(ctx)); + ctx.m_setup = setup; + ctx.m_config = &config; + + for (size_t j = 0; j < config.m_clusters.size(); j++) + { + ctx.m_cluster = config.m_clusters[j]; + + if (f_rules[i].type & atrt_process::AP_CLUSTER) + { + g_logger.debug("applying rule %d to cluster %s", i, + ctx.m_cluster->m_name.c_str()); + if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra)) + ok = false; + } + else + { + atrt_cluster& cluster = *config.m_clusters[j]; + for (size_t k = 0; k<cluster.m_processes.size(); k++) + { + atrt_process& proc = *cluster.m_processes[k]; + ctx.m_process = cluster.m_processes[k]; + if (proc.m_type & f_rules[i].type) + { + g_logger.debug("applying rule %d to %s", i, + proc.m_proc.m_cwd.c_str()); + if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra)) + ok = false; + } + } + } + } + + if (!ok) + { + return false; + } + } + + return true; +} + +static +atrt_process* +find(atrt_config& config, int type, const char * name) +{ + BaseString tmp(name); + Vector<BaseString> src; + Vector<BaseString> dst; + tmp.split(src, "."); + + if (src.size() != 2) + { + return 0; + } + atrt_cluster* cluster = 0; + BaseString cl; + cl.appfmt(".%s", src[1].c_str()); + for (size_t i = 0; i<config.m_clusters.size(); i++) + { + if (config.m_clusters[i]->m_name == cl) + { + cluster = config.m_clusters[i]; + break; + } + } + + if (cluster == 0) + { + return 0; + } + + int idx = atoi(src[0].c_str()) - 1; + for (size_t i = 0; i<cluster->m_processes.size(); i++) + { + if (cluster->m_processes[i]->m_type & type) + { + if (idx == 0) + return cluster->m_processes[i]; + else + idx --; + } + } + + return 0; +} + +static +bool +pr_check_replication(Properties& props, proc_rule_ctx& ctx, int) +{ + if (! (ctx.m_config->m_replication == "")) + { + Vector<BaseString> list; + ctx.m_config->m_replication.split(list, ";"); + atrt_config& config = *ctx.m_config; + + ctx.m_config->m_replication = ""; + + const char * msg = "Invalid replication specification"; + for (size_t i = 0; i<list.size(); i++) + { + Vector<BaseString> rep; + list[i].split(rep, ":"); + if (rep.size() != 2) + { + g_logger.error("%s: %s (split: %d)", msg, list[i].c_str(), rep.size()); + return false; + } + + atrt_process* src = find(config, atrt_process::AP_MYSQLD,rep[0].c_str()); + atrt_process* dst = find(config, atrt_process::AP_MYSQLD,rep[1].c_str()); + + if (src == 0 || dst == 0) + { + g_logger.error("%s: %s (%d %d)", + msg, list[i].c_str(), src != 0, dst != 0); + return false; + } + + + if (dst->m_rep_src != 0) + { + g_logger.error("%s: %s : %s already has replication src (%s)", + msg, + list[i].c_str(), + dst->m_proc.m_cwd.c_str(), + dst->m_rep_src->m_proc.m_cwd.c_str()); + return false; + } + + dst->m_rep_src = src; + src->m_rep_dst.push_back(dst); + + src->m_options.m_features |= PO_REP_MASTER; + dst->m_options.m_features |= PO_REP_SLAVE; + } + } + return true; +} + +static +bool +pr_check_features(Properties& props, proc_rule_ctx& ctx, int) +{ + int features = 0; + atrt_cluster& cluster = *ctx.m_cluster; + for (size_t i = 0; i<cluster.m_processes.size(); i++) + { + if (cluster.m_processes[i]->m_type == atrt_process::AP_NDB_MGMD || + cluster.m_processes[i]->m_type == atrt_process::AP_NDB_API || + cluster.m_processes[i]->m_type == atrt_process::AP_NDBD) + { + features |= atrt_options::AO_NDBCLUSTER; + break; + } + } + + if (features) + { + cluster.m_options.m_features |= features; + for (size_t i = 0; i<cluster.m_processes.size(); i++) + { + cluster.m_processes[i]->m_options.m_features |= features; + } + } + return true; +} + +static +bool +pr_fix_client(Properties& props, proc_rule_ctx& ctx, int) +{ + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + const char * name = opt.name; + if (opt.type & atrt_process::AP_CLIENT) + { + const char * val; + atrt_process& proc = *ctx.m_process; + if (!proc.m_options.m_loaded.get(name, &val)) + { + require(proc.m_mysqld->m_options.m_loaded.get(name, &val)); + proc.m_options.m_loaded.put(name, val); + proc.m_options.m_generated.put(name, val); + } + } + } + + return true; +} + +static +Uint32 +try_default_port(atrt_process& proc, const char * name) +{ + Uint32 port = + strcmp(name, "--port=") == 0 ? 3306 : + strcmp(name, "--PortNumber=") == 0 ? 1186 : + 0; + + atrt_host * host = proc.m_host; + for (size_t i = 0; i<host->m_processes.size(); i++) + { + const char * val; + if (host->m_processes[i]->m_options.m_loaded.get(name, &val)) + { + if ((Uint32)atoi(val) == port) + return 0; + } + } + return port; +} + +static +bool +generate(atrt_process& proc, const char * name, Properties& props) +{ + atrt_options& opts = proc.m_options; + if (strcmp(name, "--port=") == 0 || + strcmp(name, "--PortNumber=") == 0) + { + Uint32 val; + if (g_default_ports == 0 || (val = try_default_port(proc, name)) == 0) + { + val = g_baseport; + props.get("--PortNumber=", &val); + props.put("--PortNumber=", (val + 1), true); + } + + char buf[255]; + snprintf(buf, sizeof(buf), "%u", val); + opts.m_loaded.put(name, buf); + opts.m_generated.put(name, buf); + return true; + } + else if (strcmp(name, "--datadir=") == 0) + { + opts.m_loaded.put(name, proc.m_proc.m_cwd.c_str()); + opts.m_generated.put(name, proc.m_proc.m_cwd.c_str()); + return true; + } + else if (strcmp(name, "--FileSystemPath=") == 0) + { + BaseString dir; + dir.append(proc.m_host->m_basedir); + dir.append("/"); + dir.append(proc.m_cluster->m_dir); + opts.m_loaded.put(name, dir.c_str()); + opts.m_generated.put(name, dir.c_str()); + return true; + } + else if (strcmp(name, "--socket=") == 0) + { + const char * sock = 0; + if (g_default_ports) + { + sock = "/tmp/mysql.sock"; + atrt_host * host = proc.m_host; + for (size_t i = 0; i<host->m_processes.size(); i++) + { + const char * val; + if (host->m_processes[i]->m_options.m_loaded.get(name, &val)) + { + if (strcmp(sock, val) == 0) + { + sock = 0; + break; + } + } + } + } + + BaseString tmp; + if (sock == 0) + { + tmp.assfmt("%s/mysql.sock", proc.m_proc.m_cwd.c_str()); + sock = tmp.c_str(); + } + + opts.m_loaded.put(name, sock); + opts.m_generated.put(name, sock); + return true; + } + else if (strcmp(name, "--server-id=") == 0) + { + Uint32 val = 1; + props.get(name, &val); + char buf[255]; + snprintf(buf, sizeof(buf), "%u", val); + opts.m_loaded.put(name, buf); + opts.m_generated.put(name, buf); + props.put(name, (val + 1), true); + return true; + } + else if (strcmp(name, "--log-bin") == 0) + { + opts.m_loaded.put(name, ""); + opts.m_generated.put(name, ""); + return true; + } + else if (strcmp(name, "--master-host=") == 0) + { + require(proc.m_rep_src != 0); + opts.m_loaded.put(name, proc.m_rep_src->m_host->m_hostname.c_str()); + opts.m_generated.put(name, proc.m_rep_src->m_host->m_hostname.c_str()); + return true; + } + else if (strcmp(name, "--master-port=") == 0) + { + const char* val; + require(proc.m_rep_src->m_options.m_loaded.get("--port=", &val)); + opts.m_loaded.put(name, val); + opts.m_generated.put(name, val); + return true; + } + else if (strcmp(name, "--master-user=") == 0) + { + opts.m_loaded.put(name, "root"); + opts.m_generated.put(name, "root"); + return true; + } + else if (strcmp(name, "--master-password=") == 0) + { + opts.m_loaded.put(name, "\"\""); + opts.m_generated.put(name, "\"\""); + return true; + } + + g_logger.warning("Unknown parameter: %s", name); + return true; +} + +static +bool +pr_proc_options(Properties& props, proc_rule_ctx& ctx, int extra) +{ + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + atrt_process& proc = *ctx.m_process; + const char * name = opt.name; + if (opt.type & proc.m_type) + { + if (opt.options == 0 || + (opt.options & extra & proc.m_options.m_features)) + { + const char * val; + if (!proc.m_options.m_loaded.get(name, &val)) + { + generate(proc, name, props); + } + } + } + } + return true; +} + +static +bool +pr_fix_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int) +{ + const char * val; + atrt_cluster& cluster = *ctx.m_cluster; + + if (cluster.m_options.m_features & atrt_options::AO_NDBCLUSTER) + { + if (!cluster.m_options.m_loaded.get(ndbcs, &val)) + { + /** + * Construct connect string for this cluster + */ + BaseString str; + for (size_t i = 0; i<cluster.m_processes.size(); i++) + { + atrt_process* tmp = cluster.m_processes[i]; + if (tmp->m_type == atrt_process::AP_NDB_MGMD) + { + if (str.length()) + { + str.append(";"); + } + const char * port; + require(tmp->m_options.m_loaded.get("--PortNumber=", &port)); + str.appfmt("%s:%s", tmp->m_host->m_hostname.c_str(), port); + } + } + cluster.m_options.m_loaded.put(ndbcs, str.c_str()); + cluster.m_options.m_generated.put(ndbcs, str.c_str()); + cluster.m_options.m_loaded.get(ndbcs, &val); + } + + for (size_t i = 0; i<cluster.m_processes.size(); i++) + { + cluster.m_processes[i]->m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s", + val); + } + } + return true; +} + +static +bool +pr_set_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int) +{ + const char * val; + + atrt_process& proc = *ctx.m_process; + if (proc.m_options.m_features & atrt_options::AO_NDBCLUSTER) + { + if (!proc.m_options.m_loaded.get(ndbcs, &val)) + { + require(proc.m_cluster->m_options.m_loaded.get(ndbcs, &val)); + proc.m_options.m_loaded.put(ndbcs, val); + proc.m_options.m_generated.put(ndbcs, val); + } + + if (!proc.m_options.m_loaded.get("--ndbcluster", &val)) + { + proc.m_options.m_loaded.put("--ndbcluster", ""); + proc.m_options.m_generated.put("--ndbcluster", ""); + } + } + return true; +} + +static +bool +pr_check_proc(Properties& props, proc_rule_ctx& ctx, int) +{ + bool ok = true; + bool generated = false; + const int setup = ctx.m_setup; + atrt_process& proc = *ctx.m_process; + for (size_t i = 0; f_options[i].name; i++) + { + proc_option& opt = f_options[i]; + const char * name = opt.name; + if ((ctx.m_process->m_type & opt.type) && + (opt.options == 0 || (ctx.m_process->m_options.m_features & opt.options))) + { + const char * val; + if (!proc.m_options.m_loaded.get(name, &val)) + { + ok = false; + g_logger.warning("Missing paramter: %s for %s", + name, proc.m_proc.m_cwd.c_str()); + } + else if (proc.m_options.m_generated.get(name, &val)) + { + if (setup == 0) + { + ok = false; + g_logger.warning("Missing paramter: %s for %s", + name, proc.m_proc.m_cwd.c_str()); + } + else + { + generated = true; + } + } + } + } + + if (generated) + { + ctx.m_config->m_generated = true; + } + + //ndbout << proc << endl; + + return ok; +} + + +NdbOut& +operator<<(NdbOut& out, const atrt_process& proc) +{ + out << "[ atrt_process: "; + switch(proc.m_type){ + case atrt_process::AP_NDB_MGMD: + out << "ndb_mgmd"; + break; + case atrt_process::AP_NDBD: + out << "ndbd"; + break; + case atrt_process::AP_MYSQLD: + out << "mysqld"; + break; + case atrt_process::AP_NDB_API: + out << "ndbapi"; + break; + case atrt_process::AP_CLIENT: + out << "client"; + break; + default: + out << "<unknown: " << (int)proc.m_type << " >"; + } + + out << " cluster: " << proc.m_cluster->m_name.c_str() + << " host: " << proc.m_host->m_hostname.c_str() + << endl << " cwd: " << proc.m_proc.m_cwd.c_str() + << endl << " path: " << proc.m_proc.m_path.c_str() + << endl << " args: " << proc.m_proc.m_args.c_str() + << endl << " env: " << proc.m_proc.m_env.c_str() << endl; + + proc.m_options.m_generated.print(stdout, "generated: "); + + out << " ]"; + +#if 0 + proc.m_index = 0; //idx; + proc.m_host = host_ptr; + proc.m_cluster = cluster; + proc.m_proc.m_id = -1; + proc.m_proc.m_type = "temporary"; + proc.m_proc.m_owner = "atrt"; + proc.m_proc.m_group = cluster->m_name.c_str(); + proc.m_proc.m_cwd.assign(dir).append("/atrt/").append(cluster->m_dir); + proc.m_proc.m_stdout = "log.out"; + proc.m_proc.m_stderr = "2>&1"; + proc.m_proc.m_runas = proc.m_host->m_user; + proc.m_proc.m_ulimit = "c:unlimited"; + proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir); + proc.m_proc.m_shutdown_options = ""; +#endif + + return out; +} + diff --git a/storage/ndb/test/run-test/test-tests.txt b/storage/ndb/test/run-test/test-tests.txt new file mode 100644 index 00000000000..b57023fc0c1 --- /dev/null +++ b/storage/ndb/test/run-test/test-tests.txt @@ -0,0 +1,24 @@ +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 1800 +cmd: testMgm +args: -n SingleUserMode T1 + +# +# +# SYSTEM RESTARTS +# +max-time: 1500 +cmd: testSystemRestart +args: -n SR3 T6 + +max-time: 1500 +cmd: testSystemRestart +args: -n SR4 T6 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + diff --git a/storage/ndb/test/run-test/upgrade-boot.sh b/storage/ndb/test/run-test/upgrade-boot.sh new file mode 100644 index 00000000000..d3542166551 --- /dev/null +++ b/storage/ndb/test/run-test/upgrade-boot.sh @@ -0,0 +1,218 @@ +#!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## + +save_args=$* +VERSION="upgrade-boot.sh version 1.00" + +DATE=`date '+%Y-%m-%d'` +HOST=`hostname -s` +export DATE HOST + +set -e + +echo "`date` starting: $*" + +verbose=0 +do_clone=yes +build=yes + +tag0= +tag1= +conf= +extra_args= +extra_clone= +LOCK=$HOME/.autotest-lock + +############################ +# Read command line entries# +############################ + +while [ "$1" ] +do + case "$1" in + --no-clone) do_clone="";; + --no-build) build="";; + --verbose) verbose=`expr $verbose + 1`;; + --clone=*) clone0=`echo $1 | sed s/--clone=//`;; + --clone0=*) clone0=`echo $1 | sed s/--clone0=//`;; + --clone1=*) clone1=`echo $1 | sed s/--clone1=//`;; + --version) echo $VERSION; exit;; + --conf=*) conf=`echo $1 | sed s/--conf=//`;; + --tag=*) tag0=`echo $1 | sed s/--tag=//`;; + --tag0=*) tag0=`echo $1 | sed s/--tag0=//`;; + --tag1=*) tag1=`echo $1 | sed s/--tag1=//`;; + --*) echo "Unknown arg: $1";; + *) RUN=$*;; + esac + shift +done + +if [ -z "$clone1" ] +then + clone1=$clone0 +fi + +if [ -z "$tag0" ] +then + echo "No tag0 specified" + exit +fi + +if [ -z "$tag1" ] +then + echo "No tag1 specified" + exit +fi + +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# +if [ -z "$conf" ] +then + if [ -f "`pwd`/autotest.conf" ] + then + conf="`pwd`/autotest.conf" + elif [ -f "$HOME/autotest.conf" ] + then + conf="$HOME/autotest.conf" + fi +fi + +if [ -f $conf ] +then + . $conf +else + echo "Can't find config file: >$conf<" + exit +fi + +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="src_clone_base install_dir build_dir" +for i in $vars +do + t=`echo echo \\$$i` + if [ -z "`eval $t`" ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +#################################### +# Setup the lock file name and path# +# Setup the clone source location # +#################################### + +src_clone0=${src_clone_base}${clone0} +src_clone1=${src_clone_base}${clone1} + +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + +if [ -f $LOCK ] +then + echo "Lock file exists: $LOCK" + exit 1 +fi + +####################################### +# If the lock file does not exist then# +# create it with date and run info # +####################################### + +echo "$DATE $RUN" > $LOCK + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# +if [ `uname -s` != "SunOS" ] +then + trap "rm -f $LOCK" ERR +fi + +# You can add more to this path# +################################ + +dst_place0=${build_dir}/clone-$tag0-$DATE.$$ +dst_place1=${build_dir}/clone-$tag1-$DATE.$$ + +######################################### +# Delete source and pull down the latest# +######################################### + +if [ "$do_clone" ] +then + rm -rf $dst_place0 $dst_place1 + if [ `echo $src_clone0 | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l -r$tag0 $src_clone0 $dst_place0 + else + bk clone -r$tag0 $src_clone0 $dst_place0 + fi + + if [ `echo $src_clone1 | grep -c 'file:\/\/'` = 1 ] + then + bk clone -l -r$tag1 $src_clone1 $dst_place1 + else + bk clone -r$tag1 $src_clone1 $dst_place1 + fi +fi + +########################################## +# Build the source, make installs, and # +# create the database to be rsynced # +########################################## +install_dir0=$install_dir/$tag0 +install_dir1=$install_dir/$tag1 +if [ "$build" ] +then + cd $dst_place0 + rm -rf $install_dir0 + BUILD/compile-ndb-autotest --prefix=$install_dir0 + make install + + cd $dst_place1 + rm -rf $install_dir1 + BUILD/compile-ndb-autotest --prefix=$install_dir1 + make install +fi + + +################################ +# Start run script # +################################ + +script=$install_dir1/mysql-test/ndb/upgrade-run.sh +$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args + +if [ "$build" ] +then + rm -rf $dst_place0 $dst_place1 +fi +rm -f $LOCK diff --git a/storage/ndb/test/src/CpcClient.cpp b/storage/ndb/test/src/CpcClient.cpp index 51f2fb4cf4d..d5dfd01327e 100644 --- a/storage/ndb/test/src/CpcClient.cpp +++ b/storage/ndb/test/src/CpcClient.cpp @@ -428,8 +428,6 @@ SimpleCpcClient::SimpleCpcClient(const char *_host, int _port) { host = strdup(_host); port = _port; cpc_sock = -1; - cpc_in = NULL; - cpc_out = NULL; } SimpleCpcClient::~SimpleCpcClient() { @@ -444,12 +442,6 @@ SimpleCpcClient::~SimpleCpcClient() { close(cpc_sock); cpc_sock = -1; } - - if(cpc_in != NULL) - delete cpc_in; - - if(cpc_out != NULL) - delete cpc_out; } int @@ -475,17 +467,15 @@ SimpleCpcClient::connect() { if (::connect(cpc_sock, (struct sockaddr*) &sa, sizeof(sa)) < 0) return -1; - cpc_in = new SocketInputStream(cpc_sock, 60000); - cpc_out = new SocketOutputStream(cpc_sock); - return 0; } int SimpleCpcClient::cpc_send(const char *cmd, const Properties &args) { - - cpc_out->println(cmd); + SocketOutputStream cpc_out(cpc_sock); + + cpc_out.println(cmd); Properties::Iterator iter(&args); const char *name; @@ -498,18 +488,18 @@ SimpleCpcClient::cpc_send(const char *cmd, switch(t) { case PropertiesType_Uint32: args.get(name, &val_i); - cpc_out->println("%s: %d", name, val_i); + cpc_out.println("%s: %d", name, val_i); break; case PropertiesType_char: args.get(name, val_s); - cpc_out->println("%s: %s", name, val_s.c_str()); + cpc_out.println("%s: %s", name, val_s.c_str()); break; default: /* Silently ignore */ break; } } - cpc_out->println(""); + cpc_out.println(""); return 0; } @@ -523,9 +513,11 @@ SimpleCpcClient::Parser_t::ParserStatus SimpleCpcClient::cpc_recv(const ParserRow_t *syntax, const Properties **reply, void **user_value) { + SocketInputStream cpc_in(cpc_sock); + Parser_t::Context ctx; ParserDummy session(cpc_sock); - Parser_t parser(syntax, *cpc_in, true, true, true); + Parser_t parser(syntax, cpc_in, true, true, true); *reply = parser.parse(ctx, session); if(user_value != NULL) *user_value = ctx.m_currentCmd->user_value; diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp index a8aa52aaf99..93a9eaf435a 100644 --- a/storage/ndb/test/src/HugoOperations.cpp +++ b/storage/ndb/test/src/HugoOperations.cpp @@ -93,6 +93,7 @@ rand_lock_mode: case NdbOperation::LM_Read: case NdbOperation::LM_Exclusive: case NdbOperation::LM_CommittedRead: + case NdbOperation::LM_SimpleRead: if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex && pIndexScanOp == 0) { @@ -301,8 +302,8 @@ int HugoOperations::execute_Commit(Ndb* pNdb, int check = 0; check = pTrans->execute(Commit, eao); - if( check == -1 ) { - const NdbError err = pTrans->getNdbError(); + const NdbError err = pTrans->getNdbError(); + if( check == -1 || err.code) { ERR(err); NdbOperation* pOp = pTrans->getNdbErrorOperation(); if (pOp != NULL){ @@ -350,13 +351,16 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){ int check; check = pTrans->execute(NoCommit, eao); - if( check == -1 ) { - const NdbError err = pTrans->getNdbError(); + const NdbError err = pTrans->getNdbError(); + if( check == -1 || err.code) { ERR(err); - NdbOperation* pOp; - while ((pOp = pTrans->getNdbErrorOperation()) != NULL){ + const NdbOperation* pOp = pTrans->getNdbErrorOperation(); + while (pOp != NULL) + { const NdbError err2 = pOp->getNdbError(); - ERR(err2); + if (err2.code) + ERR(err2); + pOp = pTrans->getNextCompletedOperation(pOp); } if (err.code == 0) return NDBT_FAILED; @@ -428,7 +432,7 @@ HugoOperations::callback(int res, NdbTransaction* pCon) int HugoOperations::execute_async(Ndb* pNdb, NdbTransaction::ExecType et, - NdbTransaction::AbortOption eao){ + NdbOperation::AbortOption eao){ m_async_reply= 0; pTrans->executeAsynchPrepare(et, @@ -443,7 +447,7 @@ HugoOperations::execute_async(Ndb* pNdb, NdbTransaction::ExecType et, int HugoOperations::execute_async_prepare(Ndb* pNdb, NdbTransaction::ExecType et, - NdbTransaction::AbortOption eao){ + NdbOperation::AbortOption eao){ m_async_reply= 0; pTrans->executeAsynchPrepare(et, diff --git a/storage/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp index d4d5652bf1f..0e5f7cd8115 100644 --- a/storage/ndb/test/src/HugoTransactions.cpp +++ b/storage/ndb/test/src/HugoTransactions.cpp @@ -99,7 +99,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ @@ -250,7 +250,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ @@ -391,18 +391,21 @@ restart: } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); - ERR(err); - closeTransaction(pNdb); if (err.status == NdbError::TemporaryError){ + ERR(err); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); + retryAttempt++; continue; } + ERR(err); + closeTransaction(pNdb); return NDBT_FAILED; } - + // Abort after 1-100 or 1-records rows int ranVal = rand(); int abortCount = ranVal % (records == 0 ? 100 : records); @@ -413,69 +416,62 @@ restart: abortTrans = true; } + int eof; int rows = 0; - while((check = pOp->nextResult(true)) == 0){ - do { - rows++; - NdbOperation* pUp = pOp->updateCurrentTuple(); - if(pUp == 0){ + while((eof = pOp->nextResult(true)) == 0){ + rows++; + if (calc.verifyRowValues(&row) != 0){ + closeTransaction(pNdb); + return NDBT_FAILED; + } + + if (abortCount == rows && abortTrans == true){ + ndbout << "Scan is aborted" << endl; + g_info << "Scan is aborted" << endl; + pOp->close(); + if( check == -1 ) { ERR(pTrans->getNdbError()); closeTransaction(pNdb); return NDBT_FAILED; } - const int updates = calc.getUpdatesValue(&row) + 1; - const int r = calc.getIdValue(&row); - for(a = 0; a<tab.getNoOfColumns(); a++){ - if (tab.getColumn(a)->getPrimaryKey() == false){ - if(setValueForAttr(pUp, a, r, updates ) != 0){ - ERR(pTrans->getNdbError()); - closeTransaction(pNdb); - return NDBT_FAILED; - } - } - } - - if (rows == abortCount && abortTrans == true){ - g_info << "Scan is aborted" << endl; - // This scan should be aborted - closeTransaction(pNdb); - return NDBT_OK; - } - } while((check = pOp->nextResult(false)) == 0); - - if(check != -1){ - check = pTrans->execute(Commit); - if(check != -1) - m_latest_gci = pTrans->getGCI(); - pTrans->restart(); - } - - const NdbError err = pTrans->getNdbError(); - if( check == -1 ) { + closeTransaction(pNdb); - ERR(err); - if (err.status == NdbError::TemporaryError){ - NdbSleep_MilliSleep(50); - goto restart; - } - return NDBT_FAILED; + return NDBT_OK; } } - - const NdbError err = pTrans->getNdbError(); - if( check == -1 ) { - closeTransaction(pNdb); - ERR(err); + if (eof == -1) { + const NdbError err = pTrans->getNdbError(); + if (err.status == NdbError::TemporaryError){ + ERR_INFO(err); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); - goto restart; + switch (err.code){ + case 488: + case 245: + case 490: + // Too many active scans, no limit on number of retry attempts + break; + default: + retryAttempt++; + } + continue; } + ERR(err); + closeTransaction(pNdb); return NDBT_FAILED; } - + closeTransaction(pNdb); + + g_info << rows << " rows have been read" << endl; + if (records != 0 && rows != records){ + g_err << "Check expected number of records failed" << endl + << " expected=" << records <<", " << endl + << " read=" << rows << endl; + return NDBT_FAILED; + } - g_info << rows << " rows have been updated" << endl; return NDBT_OK; } return NDBT_FAILED; @@ -618,14 +614,14 @@ HugoTransactions::loadTableStartFrom(Ndb* pNdb, closeTrans = false; if (!abort) { - check = pTrans->execute( Commit ); + check = pTrans->execute(Commit, AbortOnError); if(check != -1) m_latest_gci = pTrans->getGCI(); pTrans->restart(); } else { - check = pTrans->execute( NoCommit ); + check = pTrans->execute(NoCommit, AbortOnError); if (check != -1) { check = pTrans->execute( Rollback ); @@ -634,7 +630,7 @@ HugoTransactions::loadTableStartFrom(Ndb* pNdb, } } else { closeTrans = false; - check = pTrans->execute( NoCommit ); + check = pTrans->execute(NoCommit, AbortOnError); } if(check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -755,7 +751,7 @@ HugoTransactions::fillTableStartFrom(Ndb* pNdb, } // Execute the transaction and insert the record - check = pTrans->execute( Commit, CommitAsMuchAsPossible ); + check = pTrans->execute(Commit, CommitAsMuchAsPossible); if(check == -1 ) { const NdbError err = pTrans->getNdbError(); closeTransaction(pNdb); @@ -877,7 +873,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, return NDBT_FAILED; } - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1011,7 +1007,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1062,7 +1058,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if(check != 2) break; - if((check = pTrans->execute(NoCommit)) != 0) + if((check = pTrans->execute(NoCommit, AbortOnError)) != 0) break; } if(check != 1 || rows_found != batch) @@ -1090,7 +1086,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, return NDBT_FAILED; } } - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); } if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1193,7 +1189,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1227,7 +1223,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } // PKs - if (equalForRow(pOp, r) != 0) + if (equalForRow(pUpdOp, r) != 0) { closeTransaction(pNdb); return NDBT_FAILED; @@ -1264,7 +1260,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1361,7 +1357,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, return NDBT_FAILED; } - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); if( check == -1) { const NdbError err = pTrans->getNdbError(); @@ -1479,7 +1475,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, int lockCount = lockTime / sleepInterval; int commitCount = 0; do { - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1) { const NdbError err = pTrans->getNdbError(); @@ -1505,7 +1501,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, } while (commitCount < lockCount); // Really commit the trans, puuh! - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); if( check == -1) { const NdbError err = pTrans->getNdbError(); @@ -1631,7 +1627,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, } } - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); check = (check == -1 ? -1 : !ordered ? check : sOp->nextResult(true)); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1768,7 +1764,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); check = (check == -1 ? -1 : !ordered ? check : sOp->nextResult(true)); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1819,7 +1815,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if(!ordered) { - if (equalForRow(pOp, r+b) != 0) + if (equalForRow(pUpdOp, r+b) != 0) { closeTransaction(pNdb); return NDBT_FAILED; @@ -1837,7 +1833,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } } - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); ERR(err); diff --git a/storage/ndb/test/src/Makefile.am b/storage/ndb/test/src/Makefile.am index 04bccef0b2f..a025579cb72 100644 --- a/storage/ndb/test/src/Makefile.am +++ b/storage/ndb/test/src/Makefile.am @@ -24,7 +24,7 @@ libNDBT_a_SOURCES = \ NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \ NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \ NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \ - CpcClient.cpp NDBT_Thread.cpp + CpcClient.cpp NdbMixRestarter.cpp NDBT_Thread.cpp INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/common/mgmcommon -I$(top_srcdir)/storage/ndb/include/mgmcommon -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/src/mgmapi diff --git a/storage/ndb/test/src/NDBT_Table.cpp b/storage/ndb/test/src/NDBT_Table.cpp index 74940396422..1787bef9aba 100644 --- a/storage/ndb/test/src/NDBT_Table.cpp +++ b/storage/ndb/test/src/NDBT_Table.cpp @@ -33,9 +33,9 @@ operator <<(class NdbOut& ndbout, const NDBT_Table & tab) ndbout << "Length of frm data: " << tab.getFrmLength() << endl; ndbout << "Row Checksum: " << tab.getRowChecksumIndicator() << endl; ndbout << "Row GCI: " << tab.getRowGCIIndicator() << endl; + ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl; ndbout << "ForceVarPart: " << tab.getForceVarPart() << endl; - //<< ((tab.getTupleKey() == TupleId) ? " tupleid" : "") <<endl; ndbout << "TableStatus: "; switch(tab.getObjectStatus()){ diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp index 9c908ab27c6..69f3723ca75 100644 --- a/storage/ndb/test/src/NDBT_Test.cpp +++ b/storage/ndb/test/src/NDBT_Test.cpp @@ -146,8 +146,10 @@ void NDBT_Context::setProperty(const char* _name, Uint32 _val){ NdbMutex_Lock(propertyMutexPtr); const bool b = props.put(_name, _val, true); assert(b == true); + NdbCondition_Broadcast(propertyCondPtr); NdbMutex_Unlock(propertyMutexPtr); } + void NDBT_Context::decProperty(const char * name){ NdbMutex_Lock(propertyMutexPtr); @@ -159,6 +161,7 @@ NDBT_Context::decProperty(const char * name){ NdbCondition_Broadcast(propertyCondPtr); NdbMutex_Unlock(propertyMutexPtr); } + void NDBT_Context::incProperty(const char * name){ NdbMutex_Lock(propertyMutexPtr); @@ -173,6 +176,7 @@ void NDBT_Context::setProperty(const char* _name, const char* _val){ NdbMutex_Lock(propertyMutexPtr); const bool b = props.put(_name, _val); assert(b == true); + NdbCondition_Broadcast(propertyCondPtr); NdbMutex_Unlock(propertyMutexPtr); } @@ -947,6 +951,63 @@ NDBT_TestSuite::executeOne(Ndb_cluster_connection& con, } } +int +NDBT_TestSuite::executeOneCtx(Ndb_cluster_connection& con, + const NdbDictionary::Table *ptab, const char* _testname){ + + testSuiteTimer.doStart(); + + do{ + if(tests.size() == 0) + break; + + Ndb ndb(&con, "TEST_DB"); + ndb.init(1024); + + int result = ndb.waitUntilReady(300); // 5 minutes + if (result != 0){ + g_err << name <<": Ndb was not ready" << endl; + break; + } + + ndbout << name << " started [" << getDate() << "]" << endl; + ndbout << "|- " << ptab->getName() << endl; + + for (unsigned t = 0; t < tests.size(); t++){ + + if (_testname != NULL && + strcasecmp(tests[t]->getName(), _testname) != 0) + continue; + + tests[t]->initBeforeTest(); + + ctx = new NDBT_Context(con); + ctx->setTab(ptab); + ctx->setNumRecords(records); + ctx->setNumLoops(loops); + if(remote_mgm != NULL) + ctx->setRemoteMgm(remote_mgm); + ctx->setSuite(this); + + result = tests[t]->execute(ctx); + if (result != NDBT_OK) + numTestsFail++; + else + numTestsOk++; + numTestsExecuted++; + + delete ctx; + } + + if (numTestsFail > 0) + break; + }while(0); + + testSuiteTimer.doStop(); + int res = report(_testname); + return NDBT_ProgramExit(res); +} + int NDBT_TestSuite::createHook(Ndb* ndb, NdbDictionary::Table& tab, int when) { @@ -1134,35 +1195,35 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS(""), { "print", OPT_PRINT, "Print execution tree", - (gptr*) &opt_print, (gptr*) &opt_print, 0, + (uchar **) &opt_print, (uchar **) &opt_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_html", OPT_PRINT_HTML, "Print execution tree in html table format", - (gptr*) &opt_print_html, (gptr*) &opt_print_html, 0, + (uchar **) &opt_print_html, (uchar **) &opt_print_html, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_cases", OPT_PRINT_CASES, "Print list of test cases", - (gptr*) &opt_print_cases, (gptr*) &opt_print_cases, 0, + (uchar **) &opt_print_cases, (uchar **) &opt_print_cases, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "records", 'r', "Number of records", - (gptr*) &opt_records, (gptr*) &opt_records, 0, + (uchar **) &opt_records, (uchar **) &opt_records, 0, GET_INT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "loops", 'l', "Number of loops", - (gptr*) &opt_loops, (gptr*) &opt_loops, 0, + (uchar **) &opt_loops, (uchar **) &opt_loops, 0, GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "seed", 1024, "Random seed", - (gptr*) &opt_seed, (gptr*) &opt_seed, 0, + (uchar **) &opt_seed, (uchar **) &opt_seed, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "testname", 'n', "Name of test to run", - (gptr*) &opt_testname, (gptr*) &opt_testname, 0, + (uchar **) &opt_testname, (uchar **) &opt_testname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "remote_mgm", 'm', "host:port to mgmsrv of remote cluster", - (gptr*) &opt_remote_mgm, (gptr*) &opt_remote_mgm, 0, + (uchar **) &opt_remote_mgm, (uchar **) &opt_remote_mgm, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "timer", 't', "Print execution time", - (gptr*) &opt_timer, (gptr*) &opt_timer, 0, + (uchar **) &opt_timer, (uchar **) &opt_timer, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "verbose", 'v', "Print verbose status", - (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0, + (uchar **) &opt_verbose, (uchar **) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/test/src/NdbMixRestarter.cpp b/storage/ndb/test/src/NdbMixRestarter.cpp new file mode 100644 index 00000000000..81631afaa1a --- /dev/null +++ b/storage/ndb/test/src/NdbMixRestarter.cpp @@ -0,0 +1,312 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "NdbMixRestarter.hpp" + +NdbMixRestarter::NdbMixRestarter(const char* _addr) : + NdbRestarter(_addr), + m_mask(~(Uint32)0) +{ +} + +NdbMixRestarter::~NdbMixRestarter() +{ + +} + +#define CHECK(b) if (!(b)) { \ + ndbout << "ERR: "<< step->getName() \ + << " failed on line " << __LINE__ << endl; \ + result = NDBT_FAILED; \ + continue; } + +int +NdbMixRestarter::restart_cluster(NDBT_Context* ctx, + NDBT_Step* step, + bool stopabort) +{ + int timeout = 180; + int result = NDBT_OK; + + do + { + ctx->setProperty(NMR_SR_THREADS_STOPPED, (Uint32)0); + ctx->setProperty(NMR_SR_VALIDATE_THREADS_DONE, (Uint32)0); + + ndbout << " -- Shutting down " << endl; + ctx->setProperty(NMR_SR, NdbMixRestarter::SR_STOPPING); + CHECK(restartAll(false, true, stopabort) == 0); + ctx->setProperty(NMR_SR, NdbMixRestarter::SR_STOPPED); + CHECK(waitClusterNoStart(timeout) == 0); + + Uint32 cnt = ctx->getProperty(NMR_SR_THREADS); + Uint32 curr= ctx->getProperty(NMR_SR_THREADS_STOPPED); + while(curr != cnt && !ctx->isTestStopped()) + { + if (curr > cnt) + { + ndbout_c("stopping: curr: %d cnt: %d", curr, cnt); + abort(); + } + + NdbSleep_MilliSleep(100); + curr= ctx->getProperty(NMR_SR_THREADS_STOPPED); + } + + CHECK(ctx->isTestStopped() == false); + CHECK(startAll() == 0); + CHECK(waitClusterStarted(timeout) == 0); + + cnt = ctx->getProperty(NMR_SR_VALIDATE_THREADS); + if (cnt) + { + ndbout << " -- Validating starts " << endl; + ctx->setProperty(NMR_SR_VALIDATE_THREADS_DONE, (Uint32)0); + ctx->setProperty(NMR_SR, NdbMixRestarter::SR_VALIDATING); + curr = ctx->getProperty(NMR_SR_VALIDATE_THREADS_DONE); + while (curr != cnt && !ctx->isTestStopped()) + { + if (curr > cnt) + { + ndbout_c("validating: curr: %d cnt: %d", curr, cnt); + abort(); + } + + NdbSleep_MilliSleep(100); + curr = ctx->getProperty(NMR_SR_VALIDATE_THREADS_DONE); + } + ndbout << " -- Validating complete " << endl; + } + CHECK(ctx->isTestStopped() == false); + ctx->setProperty(NMR_SR, NdbMixRestarter::SR_RUNNING); + + } while(0); + + return result; +} + +static +ndb_mgm_node_state* +select_node_to_stop(Vector<ndb_mgm_node_state>& nodes) +{ + Uint32 i, j; + Vector<ndb_mgm_node_state*> alive_nodes; + for(i = 0; i<nodes.size(); i++) + { + ndb_mgm_node_state* node = &nodes[i]; + if (node->node_status == NDB_MGM_NODE_STATUS_STARTED) + alive_nodes.push_back(node); + } + + Vector<ndb_mgm_node_state*> victims; + // Remove those with one in node group + for(i = 0; i<alive_nodes.size(); i++) + { + int group = alive_nodes[i]->node_group; + for(j = 0; j<alive_nodes.size(); j++) + { + if (i != j && alive_nodes[j]->node_group == group) + { + victims.push_back(alive_nodes[i]); + break; + } + } + } + + if (victims.size()) + { + int victim = rand() % victims.size(); + return victims[victim]; + } + else + { + return 0; + } +} + +static +ndb_mgm_node_state* +select_node_to_start(Vector<ndb_mgm_node_state>& nodes) +{ + Uint32 i; + Vector<ndb_mgm_node_state*> victims; + for(i = 0; i<nodes.size(); i++) + { + ndb_mgm_node_state* node = &nodes[i]; + if (node->node_status == NDB_MGM_NODE_STATUS_NOT_STARTED) + victims.push_back(node); + } + + if (victims.size()) + { + int victim = rand() % victims.size(); + return victims[victim]; + } + else + { + return 0; + } +} + +void +NdbMixRestarter::setRestartTypeMask(Uint32 mask) +{ + m_mask = mask; +} + +int +NdbMixRestarter::runUntilStopped(NDBT_Context* ctx, + NDBT_Step* step, + Uint32 freq) +{ + if (init(ctx, step)) + return NDBT_FAILED; + + while (!ctx->isTestStopped()) + { + if (dostep(ctx, step)) + return NDBT_FAILED; + NdbSleep_SecSleep(freq); + } + + if (!finish(ctx, step)) + return NDBT_FAILED; + + return NDBT_OK; +} + +int +NdbMixRestarter::runPeriod(NDBT_Context* ctx, + NDBT_Step* step, + Uint32 period, Uint32 freq) +{ + if (init(ctx, step)) + return NDBT_FAILED; + + Uint32 stop = time(0) + period; + while (!ctx->isTestStopped() && (time(0) < stop)) + { + if (dostep(ctx, step)) + { + return NDBT_FAILED; + } + NdbSleep_SecSleep(freq); + } + + if (finish(ctx, step)) + { + return NDBT_FAILED; + } + + ctx->stopTest(); + return NDBT_OK; +} + +int +NdbMixRestarter::init(NDBT_Context* ctx, NDBT_Step* step) +{ + waitClusterStarted(); + m_nodes = ndbNodes; + return 0; +} + +int +NdbMixRestarter::dostep(NDBT_Context* ctx, NDBT_Step* step) +{ + ndb_mgm_node_state* node = 0; + int action; +loop: + while(((action = (1 << (rand() % RTM_COUNT))) & m_mask) == 0); + switch(action){ + case RTM_RestartCluster: + if (restart_cluster(ctx, step)) + return NDBT_FAILED; + for (Uint32 i = 0; i<m_nodes.size(); i++) + m_nodes[i].node_status = NDB_MGM_NODE_STATUS_STARTED; + break; + case RTM_RestartNode: + case RTM_RestartNodeInitial: + case RTM_StopNode: + case RTM_StopNodeInitial: + { + if ((node = select_node_to_stop(m_nodes)) == 0) + goto loop; + + if (action == RTM_RestartNode || action == RTM_RestartNodeInitial) + ndbout << "Restarting " << node->node_id; + else + ndbout << "Stopping " << node->node_id; + + bool initial = + action == RTM_RestartNodeInitial || action == RTM_StopNodeInitial; + + if (initial) + ndbout << " inital"; + ndbout << endl; + + if (restartOneDbNode(node->node_id, initial, true, true)) + return NDBT_FAILED; + + if (waitNodesNoStart(&node->node_id, 1)) + return NDBT_FAILED; + + node->node_status = NDB_MGM_NODE_STATUS_NOT_STARTED; + + if (action == RTM_StopNode || action == RTM_StopNodeInitial) + break; + else + goto start; + } + case RTM_StartNode: + if ((node = select_node_to_start(m_nodes)) == 0) + goto loop; +start: + ndbout << "Starting " << node->node_id << endl; + if (startNodes(&node->node_id, 1)) + return NDBT_FAILED; + if (waitNodesStarted(&node->node_id, 1)) + return NDBT_FAILED; + + node->node_status = NDB_MGM_NODE_STATUS_STARTED; + break; + } + return NDBT_OK; +} + +int +NdbMixRestarter::finish(NDBT_Context* ctx, NDBT_Step* step) +{ + Vector<int> not_started; + { + ndb_mgm_node_state* node = 0; + while((node = select_node_to_start(m_nodes))) + { + not_started.push_back(node->node_id); + node->node_status = NDB_MGM_NODE_STATUS_STARTED; + } + } + + if (not_started.size()) + { + ndbout << "Starting stopped nodes " << endl; + if (startNodes(not_started.getBase(), not_started.size())) + return NDBT_FAILED; + if (waitClusterStarted()) + return NDBT_FAILED; + } + return NDBT_OK; +} + +template class Vector<ndb_mgm_node_state*>; diff --git a/storage/ndb/test/src/NdbRestarter.cpp b/storage/ndb/test/src/NdbRestarter.cpp index 299517b32d3..1acd28dab23 100644 --- a/storage/ndb/test/src/NdbRestarter.cpp +++ b/storage/ndb/test/src/NdbRestarter.cpp @@ -26,6 +26,8 @@ #define MGMERR(h) \ ndbout << "latest_error="<<ndb_mgm_get_latest_error(h) \ << ", line="<<ndb_mgm_get_latest_error_line(h) \ + << ", mesg="<<ndb_mgm_get_latest_error_msg(h) \ + << ", desc="<<ndb_mgm_get_latest_error_desc(h) \ << endl; diff --git a/storage/ndb/test/src/UtilTransactions.cpp b/storage/ndb/test/src/UtilTransactions.cpp index 41c3d2f1d83..776ffd176b3 100644 --- a/storage/ndb/test/src/UtilTransactions.cpp +++ b/storage/ndb/test/src/UtilTransactions.cpp @@ -92,7 +92,7 @@ UtilTransactions::clearTable(Ndb* pNdb, goto failed; } - if(pTrans->execute(NoCommit) != 0){ + if(pTrans->execute(NoCommit, AbortOnError) != 0){ err = pTrans->getNdbError(); if(err.status == NdbError::TemporaryError){ ERR(err); @@ -112,7 +112,7 @@ UtilTransactions::clearTable(Ndb* pNdb, } while((check = pOp->nextResult(false)) == 0); if(check != -1){ - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); pTrans->restart(); } @@ -253,7 +253,7 @@ UtilTransactions::copyTableData(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { ERR(pTrans->getNdbError()); closeTransaction(pNdb); @@ -270,7 +270,7 @@ UtilTransactions::copyTableData(Ndb* pNdb, } } while((eof = pOp->nextResult(false)) == 0); - check = pTrans->execute(Commit); + check = pTrans->execute(Commit, AbortOnError); pTrans->restart(); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -422,7 +422,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb, } // ************************************************* - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -528,7 +528,7 @@ UtilTransactions::selectCount(Ndb* pNdb, } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { ERR(pTrans->getNdbError()); closeTransaction(pNdb); @@ -701,7 +701,7 @@ restart: } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -964,7 +964,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, printf("\n"); #endif scanTrans->refresh(); - check = pTrans1->execute(Commit); + check = pTrans1->execute(Commit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans1->getNdbError(); @@ -1086,7 +1086,7 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb, abort(); } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); @@ -1145,7 +1145,7 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb, goto error; } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NoCommit, AbortOnError); if(check) goto error; @@ -1384,7 +1384,7 @@ loop: } } - if( pTrans->execute(NoCommit) == -1 ) { + if( pTrans->execute(NoCommit, AbortOnError) == -1 ) { ERR(err= pTrans->getNdbError()); goto error; } @@ -1407,7 +1407,8 @@ loop: ERR(err= cmp.getTransaction()->getNdbError()); goto error; } - if(cmp.execute_Commit(pNdb) != NDBT_OK) + if(cmp.execute_Commit(pNdb) != NDBT_OK || + cmp.getTransaction()->getNdbError().code) { ERR(err= cmp.getTransaction()->getNdbError()); goto error; diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am index 91cc1a4796c..1683d4d84ae 100644 --- a/storage/ndb/test/tools/Makefile.am +++ b/storage/ndb/test/tools/Makefile.am @@ -13,7 +13,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event rep_latency eventlog +ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event eventlog rep_latency # transproxy @@ -40,6 +40,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am ndb_cpcc_LDADD = $(LDADD) +ndb_cpcc_LDFLAGS = -static # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/ndb/test/tools/listen.cpp b/storage/ndb/test/tools/listen.cpp index 97c307e9c15..e51b213195b 100644 --- a/storage/ndb/test/tools/listen.cpp +++ b/storage/ndb/test/tools/listen.cpp @@ -357,6 +357,9 @@ main(int argc, const char** argv){ } } end: + for(i= 0; i<(int)event_ops.size(); i++) + MyNdb.dropEventOperation(event_ops[i]); + if (ndb2) delete ndb2; if (con2) diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am index 3d0c6f79146..de0f36963bf 100644 --- a/storage/ndb/tools/Makefile.am +++ b/storage/ndb/tools/Makefile.am @@ -14,7 +14,7 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA dist_bin_SCRIPTS = ndb_size.pl ndb_error_reporter -dist_pkgdata_DATA = ndb_size.tmpl +dist_pkgdata_DATA = ndbtools_PROGRAMS = \ ndb_test_platform \ @@ -50,7 +50,7 @@ ndb_restore_SOURCES = restore/restore_main.cpp \ restore/Restore.cpp \ ../test/src/NDBT_ResultRow.cpp $(tools_common_sources) -ndb_config_SOURCES = ndb_condig.cpp \ +ndb_config_SOURCES = ndb_config.cpp \ ../src/mgmsrv/Config.cpp \ ../src/mgmsrv/ConfigInfo.cpp \ ../src/mgmsrv/InitConfigFileParser.cpp diff --git a/storage/ndb/tools/delete_all.cpp b/storage/ndb/tools/delete_all.cpp index e032709856e..1bf89f5a32f 100644 --- a/storage/ndb/tools/delete_all.cpp +++ b/storage/ndb/tools/delete_all.cpp @@ -36,24 +36,26 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "transactional", 't', "Single transaction (may run out of operations)", - (gptr*) &_transactional, (gptr*) &_transactional, 0, + (uchar**) &_transactional, (uchar**) &_transactional, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tupscan", 999, "Run tupscan", - (gptr*) &_tupscan, (gptr*) &_tupscan, 0, + (uchar**) &_tupscan, (uchar**) &_tupscan, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "diskscan", 999, "Run diskcan", - (gptr*) &_diskscan, (gptr*) &_diskscan, 0, + (uchar**) &_diskscan, (uchar**) &_diskscan, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program will delete all records in the specified table using scan delete.\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -73,6 +75,7 @@ int main(int argc, char** argv){ return NDBT_ProgramExit(NDBT_WRONGARGS); Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_delete_all"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index 02ab9d5f25f..831005139de 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -39,25 +39,27 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "unqualified", 'u', "Use unqualified table names", - (gptr*) &_unqualified, (gptr*) &_unqualified, 0, + (uchar**) &_unqualified, (uchar**) &_unqualified, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "extra-partition-info", 'p', "Print more info per partition", - (gptr*) &_partinfo, (gptr*) &_partinfo, 0, + (uchar**) &_partinfo, (uchar**) &_partinfo, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "retries", 'r', "Retry every second for # retries", - (gptr*) &_retries, (gptr*) &_retries, 0, + (uchar**) &_retries, (uchar**) &_retries, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program list all properties of table(s) in NDB Cluster.\n"\ " ex: desc T1 T2 T4\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -79,6 +81,7 @@ int main(int argc, char** argv){ return NDBT_ProgramExit(NDBT_WRONGARGS); Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_desc"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; @@ -96,7 +99,6 @@ int main(int argc, char** argv){ return NDBT_ProgramExit(NDBT_FAILED); } - NdbDictionary::Dictionary * dict= MyNdb.getDictionary(); for(int i= 0; i<argc;i++) { if(desc_table(&MyNdb,argv[i])) diff --git a/storage/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp index 7cc791dcdb7..ec88f331a80 100644 --- a/storage/ndb/tools/drop_index.cpp +++ b/storage/ndb/tools/drop_index.cpp @@ -30,15 +30,17 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "[<table> <index>]+\n"\ "This program will drop index(es) in Ndb\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -59,6 +61,7 @@ int main(int argc, char** argv){ } Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_drop_index"); if(con.connect(12, 5, 1) != 0) { return NDBT_ProgramExit(NDBT_FAILED); diff --git a/storage/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp index efbbba73d4b..8d07afbbf50 100644 --- a/storage/ndb/tools/drop_tab.cpp +++ b/storage/ndb/tools/drop_tab.cpp @@ -30,15 +30,17 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program will drop one table in Ndb\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -59,6 +61,7 @@ int main(int argc, char** argv){ } Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_drop_table"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; diff --git a/storage/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp index 6c78f93d285..45129cb34af 100644 --- a/storage/ndb/tools/listTables.cpp +++ b/storage/ndb/tools/listTables.cpp @@ -256,27 +256,28 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_show_tables"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "loops", 'l', "loops", - (gptr*) &_loops, (gptr*) &_loops, 0, + (uchar**) &_loops, (uchar**) &_loops, 0, GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 }, { "type", 't', "type", - (gptr*) &_type, (gptr*) &_type, 0, + (uchar**) &_type, (uchar**) &_type, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "unqualified", 'u', "Use unqualified table names", - (gptr*) &_unqualified, (gptr*) &_unqualified, 0, + (uchar**) &_unqualified, (uchar**) &_unqualified, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parsable", 'p', "Return output suitable for mysql LOAD DATA INFILE", - (gptr*) &_parsable, (gptr*) &_parsable, 0, + (uchar**) &_parsable, (uchar**) &_parsable, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "show-temp-status", OPT_SHOW_TMP_STATUS, "Show table temporary flag", - (gptr*) &show_temp_status, (gptr*) &show_temp_status, 0, + (uchar**) &show_temp_status, (uchar**) &show_temp_status, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program list all system objects in NDB Cluster.\n"\ @@ -284,6 +285,7 @@ static void usage() " ex: ndb_show_tables -t 2 would show all UserTables\n"\ "To show all indexes for a table write table name as final argument\n"\ " ex: ndb_show_tables T1\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -305,6 +307,7 @@ int main(int argc, char** argv){ _tabname = argv[0]; ndb_cluster_connection = new Ndb_cluster_connection(opt_connect_str); + ndb_cluster_connection->set_name("ndb_show_tables"); if (ndb_cluster_connection->connect(12,5,1)) fatal("Unable to connect to management server."); if (ndb_cluster_connection->wait_until_ready(30,0) < 0) diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_config.cpp index 5c842076873..af36103f947 100644 --- a/storage/ndb/tools/ndb_condig.cpp +++ b/storage/ndb/tools/ndb_config.cpp @@ -58,37 +58,37 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_config"), { "nodes", 256, "Print nodes", - (gptr*) &g_nodes, (gptr*) &g_nodes, + (uchar**) &g_nodes, (uchar**) &g_nodes, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { "connections", 256, "Print connections", - (gptr*) &g_connections, (gptr*) &g_connections, + (uchar**) &g_connections, (uchar**) &g_connections, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { "query", 'q', "Query option(s)", - (gptr*) &g_query, (gptr*) &g_query, + (uchar**) &g_query, (uchar**) &g_query, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "host", 256, "Host", - (gptr*) &g_host, (gptr*) &g_host, + (uchar**) &g_host, (uchar**) &g_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "type", 258, "Type of node/connection", - (gptr*) &g_type, (gptr*) &g_type, + (uchar**) &g_type, (uchar**) &g_type, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "id", 258, "Nodeid", - (gptr*) &g_nodeid, (gptr*) &g_nodeid, + (uchar**) &g_nodeid, (uchar**) &g_nodeid, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "nodeid", 258, "Nodeid", - (gptr*) &g_nodeid, (gptr*) &g_nodeid, + (uchar**) &g_nodeid, (uchar**) &g_nodeid, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "fields", 'f', "Field separator", - (gptr*) &g_field_delimiter, (gptr*) &g_field_delimiter, + (uchar**) &g_field_delimiter, (uchar**) &g_field_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "rows", 'r', "Row separator", - (gptr*) &g_row_delimiter, (gptr*) &g_row_delimiter, + (uchar**) &g_row_delimiter, (uchar**) &g_row_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "config-file", 256, "Path to config.ini", - (gptr*) &g_config_file, (gptr*) &g_config_file, + (uchar**) &g_config_file, (uchar**) &g_config_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "mycnf", 256, "Read config from my.cnf", - (gptr*) &g_mycnf, (gptr*) &g_mycnf, + (uchar**) &g_mycnf, (uchar**) &g_mycnf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -97,6 +97,7 @@ static void usage() { char desc[] = "This program will retreive config options for a ndb cluster\n"; + puts(desc); ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -111,12 +112,14 @@ struct Match { int m_key; BaseString m_value; + Match() {} virtual int eval(const Iter&); virtual ~Match() {} }; struct HostMatch : public Match { + HostMatch() {} virtual int eval(const Iter&); }; @@ -131,11 +134,13 @@ struct Apply struct NodeTypeApply : public Apply { + NodeTypeApply() {} virtual int apply(const Iter&); }; struct ConnectionTypeApply : public Apply { + ConnectionTypeApply() {} virtual int apply(const Iter&); }; @@ -294,10 +299,10 @@ parse_where(Vector<Match*>& where, int &argc, char**& argv) Match m; if(g_host) { - HostMatch *m = new HostMatch; - m->m_key = CFG_NODE_HOST; - m->m_value.assfmt("%s", g_host); - where.push_back(m); + HostMatch *tmp = new HostMatch; + tmp->m_key = CFG_NODE_HOST; + tmp->m_value.assfmt("%s", g_host); + where.push_back(tmp); } if(g_type) diff --git a/storage/ndb/tools/ndb_error_reporter b/storage/ndb/tools/ndb_error_reporter index 2b5aadb6171..7ad7a2f478a 100644 --- a/storage/ndb/tools/ndb_error_reporter +++ b/storage/ndb/tools/ndb_error_reporter @@ -62,13 +62,13 @@ foreach my $node (@nodes) (($config_get_fs)?" with filesystem":""). "\n\n"; my $recurse= ($config_get_fs)?'-r ':''; - system 'scp '.$recurse.$config_username.config($node,'host'). + system 'scp -p '.$recurse.$config_username.config($node,'host'). ':'.config($node,'datadir')."/ndb_".$node."* ". "$reportdir/\n"; } print "\n\n Copying configuration file...\n\n\t$config_file\n\n"; -system "cp $config_file $reportdir/"; +system "cp -p $config_file $reportdir/"; my $r = system 'bzip2 2>&1 > /dev/null < /dev/null'; my $outfile; diff --git a/storage/ndb/tools/ndb_size.pl b/storage/ndb/tools/ndb_size.pl index 3d1ea3f4231..3537a9e8490 100644 --- a/storage/ndb/tools/ndb_size.pl +++ b/storage/ndb/tools/ndb_size.pl @@ -1,17 +1,29 @@ #!/usr/bin/perl -w +# Copyright (C) 2005-2007 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + use strict; use DBI; use POSIX; -use HTML::Template; +use Getopt::Long; # MySQL Cluster size estimator # ---------------------------- # -# (C)2005 MySQL AB -# -# # The purpose of this tool is to work out storage requirements # from an existing MySQL database. # @@ -25,157 +37,672 @@ use HTML::Template; # # BUGS # ---- -# - enum/set is 0 byte storage! Woah - efficient! # - DECIMAL is 0 byte storage. A bit too efficient. # - some float stores come out weird (when there's a comma e.g. 'float(4,1)') # - no disk data values # - computes the storage requirements of views (and probably MERGE) -# - ignores character sets. +# - ignores character sets? + +package MySQL::NDB::Size::Parameter; + +use Class::MethodMaker [ + scalar => 'name', + scalar => 'default', + scalar => 'mem_per_item', + scalar => [{ -default => '' },'unit'], + hash => [ qw ( value + ver_mem_per_item + mem_total ) ], + new => [ -hash => 'new' ], + ]; + +1; + +package MySQL::NDB::Size::Report; + +use Class::MethodMaker [ + scalar => [ qw( database + dsn ) ], + array => 'versions', + hash => [qw( tables + parameters + supporting_tables) ], + new => [ -hash => 'new' ], + ]; +1; + +package MySQL::NDB::Size::Column; + +use Class::MethodMaker [ + new => [ -hash => 'new' ], + scalar => [ qw( name + type + is_varsize + size + Key) ], + hash => 'dm_overhead', + scalar => [{ -default => 4 },'align'], + scalar => [{ -default => 0 },'null_bits'], + ]; + +# null_bits: +# 0 if not nullable, 1 if nullable +# + additional if bitfield as these are stored in the null bits +# if is_varsize, null_bits are in varsize part. + +# dm is default DataMemory value. Automatically 4byte aligned +# ver_dm is DataMemory value for specific versions. +# an entry in ver_dm OVERRIDES the dm value. +# e.g. if way column stored changed in new version. +# +# if is_varsize, dm/ver_dm is in varsized part. + +sub ver_dm_exists +{ + my ($self,$ver)= @_; + return exists($self->{ver_dm}{$ver}); +} + +use Data::Dumper; +sub ver_dm +{ + my ($self,$ver,$val)= @_; + if(@_ > 2) + { + $self->{ver_dm}{$ver}= + $self->align * POSIX::floor(($val+$self->align-1)/$self->align); + } + return $self->{ver_dm}{$ver}; +} -my $template = HTML::Template->new(filename => 'ndb_size.tmpl', - die_on_bad_params => 0) - or die "Could not open ndb_size.tmpl."; +sub dm +{ + my ($self,$val)= @_; + if(@_ > 1) + { + $self->{dm}= + $self->align * POSIX::floor(($val+$self->align-1)/$self->align) + } + return $self->{dm}; +} + +package MySQL::NDB::Size::Index; + +use Class::MethodMaker [ + new => [ -hash => 'new' ], + hash => [ qw( ver_dm + ver_im ) ], + scalar => [ qw( name + type + comment + columns + unique + dm + im) ], + scalar => [ { -default=> 4 },'align'], + scalar => [ { -default=> 0 },'is_supporting_table' ], + ]; + +package MySQL::NDB::Size::Table; + +# The following are computed by compute_row_size: +# row_dm_size DataMemory Size per row +# row_vdm_size Varsized DataMemory Size per row +# row_ddm_size Disk Data size per row (on disk size) +# +# These are hashes of versions. If an entry in (dm|vdm|ddm)_versions exists, +# then this parameter is calculated. +# +# Specific per-row overhead is in row_(dm|vdm|ddm)_overhead. +# e.g. if there is a varsized column, we have a vdm overhead for the +# varsized part of the row, otherwise vdm_size==0 + +# Any supporting tables - e.g. BLOBs have their name in supporting_tables +# These tables should then be looked up in the main report object. +# The main report object also has a supporting_tables hash used for +# excluding these from the main list of tables. +use POSIX; +use Class::MethodMaker [ + new => [ -hash => 'new' ], + array => [ qw( supporting_tables + dm_versions + vdm_versions + ddm_versions ) ], + scalar => [ qw( name + rows + schema + real_table_name) ], + hash => [ qw( columns + indexes + indexed_columns + row_im_size + row_dm_size + row_vdm_size + row_dm_overhead + row_vdm_overhead + row_ddm_overhead) ], + scalar => [ { -default=> 8192 },'im_pagesize'], + scalar => [ { -default=> 0 },'im_page_overhead'], + scalar => [ { -default=> 32768 },'dm_pagesize' ], + scalar => [ { -default=> 128 },'dm_page_overhead' ], + scalar => [ { -default=> 32768 },'vdm_pagesize' ], + scalar => [ { -default=> 128 },'vdm_page_overhead' ], + hash => [ # these are computed + qw( + dm_null_bytes + vdm_null_bytes + dm_needed + vdm_needed + im_needed + im_rows_per_page + dm_rows_per_page + vdm_rows_per_page) ], + scalar => [ { -default=> 4 },'align'], + ]; -my $dbh; +sub table_name +{ + my ($self) = @_; + if ($self->real_table_name) { + return $self->real_table_name; + }else { + return $self->name; + } +} -if(@ARGV < 3 || $ARGV[0] eq '--usage' || $ARGV[0] eq '--help') +sub compute_row_size +{ + my ($self, $releases) = @_; + + my %row_dm_size; + my %row_vdm_size; + my %row_im_size; + my %dm_null_bits; + my %vdm_null_bits; + my $no_varsize= 0; + + foreach my $c (keys %{$self->columns}) + { + if($self->columns->{$c}->is_varsize) + { + $no_varsize++; + foreach my $ver ($self->vdm_versions) + { + if($self->columns->{$c}->ver_dm_exists($ver)) + { + $row_vdm_size{$ver}+= $self->columns->{$c}->ver_dm($ver); + $vdm_null_bits{$ver}+= $self->columns->{$c}->null_bits(); + } + else + { + $row_vdm_size{$ver}+= $self->columns->{$c}->dm; + $vdm_null_bits{$ver}+= $self->columns->{$c}->null_bits(); + } + } + } + foreach my $ver ($self->dm_versions) + { + if($self->columns->{$c}->ver_dm_exists($ver)) + { + next if $self->columns->{$c}->is_varsize; + $row_dm_size{$ver}+= $self->columns->{$c}->ver_dm($ver); + $dm_null_bits{$ver}+= $self->columns->{$c}->null_bits(); + } + else + { + $row_dm_size{$ver}+= $self->columns->{$c}->dm||0; + $dm_null_bits{$ver}+= $self->columns->{$c}->null_bits()||0; + } + } + } + + foreach ($self->row_dm_overhead_keys()) + { + $row_dm_size{$_}+= $self->row_dm_overhead->{$_} + if exists($row_dm_size{$_}); + } + + + foreach ($self->row_vdm_overhead_keys()) + { + $row_vdm_size{$_}+= $self->row_vdm_overhead->{$_} + if exists($row_vdm_size{$_}); + } + + + # now we compute size of indexes for dm + foreach my $i (keys %{$self->indexes}) + { + foreach my $ver ($self->dm_versions) + { + $row_dm_size{$ver}+= $self->indexes->{$i}->dm() || 0; + } + } + + # now we compute size of indexes for im + while(my ($iname, $i) = $self->indexes_each()) + { + foreach my $ver ($self->dm_versions) + { + if($i->ver_im_exists($ver)) + { + $row_im_size{$ver}+= $i->ver_im->{$ver}; + } + else + { + $row_im_size{$ver}+= $i->im() || 0; + } + } + } + + # 32-bit align the null part + foreach my $k (keys %dm_null_bits) + { + $dm_null_bits{$k}= + $self->align * POSIX::floor((ceil($dm_null_bits{$k}/8)+$self->align-1) + /$self->align); + } + + foreach my $k (keys %vdm_null_bits) + { + $vdm_null_bits{$k}= + $self->align * POSIX::floor((ceil($vdm_null_bits{$k}/8)+$self->align-1) + /$self->align); + } + + # Finally set things + $self->dm_null_bytes(%dm_null_bits); + $self->vdm_null_bytes(%vdm_null_bits); + + # add null bytes to dm/vdm size + foreach my $k (keys %row_dm_size) + { + $row_dm_size{$k}+=$dm_null_bits{$k}||0; + } + + foreach my $k (keys %row_vdm_size) + { + $row_vdm_size{$k}+=$vdm_null_bits{$k}||0; + } + + $self->row_dm_size(%row_dm_size); + $self->row_vdm_size(%row_vdm_size); + $self->row_im_size(%row_im_size); +} + +sub compute_estimate +{ + my ($self) = @_; + + foreach my $ver (@{$self->dm_versions}) + { + $self->dm_rows_per_page_set($ver => + floor( + ($self->dm_pagesize - $self->dm_page_overhead) + / + $self->row_dm_size->{$ver} + ) + ); + } + + foreach my $ver (@{$self->vdm_versions}) + { + next if ! $self->row_vdm_size_exists($ver); + $self->vdm_rows_per_page_set($ver => + floor( + ($self->vdm_pagesize - $self->vdm_page_overhead) + / + $self->row_vdm_size->{$ver} + ) + ); + } + + $self->im_page_overhead(0) if !$self->im_page_overhead(); + foreach my $ver (@{$self->dm_versions}) + { + $self->im_rows_per_page_set($ver => + floor( + ($self->im_pagesize - $self->im_page_overhead) + / + $self->row_im_size->{$ver} + ) + ); + } + + $self->dm_needed_set($_ => $self->dm_pagesize() + * + POSIX::ceil( + $self->rows + / + ($self->dm_rows_per_page->{$_}) + ) + ) + foreach $self->dm_versions; + + $self->vdm_needed_set($_ => (!$self->vdm_rows_per_page->{$_})? 0 : + $self->vdm_pagesize() + * + POSIX::ceil( + $self->rows + / + ($self->vdm_rows_per_page->{$_}) + ) + ) + foreach $self->vdm_versions; + + $self->im_needed_set($_ => $self->im_pagesize() + * + POSIX::ceil( + $self->rows + / + ($self->im_rows_per_page->{$_}) + ) + ) + foreach $self->dm_versions; +} + +package main; + +my ($dbh, + $database, + $socket, + $hostname, + $user, + $password); + +my ($help, + $savequeries, + $loadqueries, + $debug, + $format, + $excludetables, + $excludedbs); + +GetOptions('database|d=s'=>\$database, + 'hostname=s'=>\$hostname, + 'socket=s'=>\$socket, + 'user|u=s'=>\$user, + 'password|p=s'=>\$password, + 'savequeries|s=s'=>\$savequeries, + 'loadqueries|l=s'=>\$loadqueries, + 'excludetables=s'=>\$excludetables, + 'excludedbs=s'=>\$excludedbs, + 'help|usage|h!'=>\$help, + 'debug'=>\$debug, + 'format|f=s'=>\$format, + ); + +my $report= new MySQL::NDB::Size::Report; + +if($help) { print STDERR "Usage:\n"; - print STDERR "\tndb_size.pl database hostname user password\n\n"; - print STDERR "If you need to specify a port number, use host:port\n\n"; + print STDERR "\tndb_size.pl --database=<db name>|ALL [--hostname=<host>] " + ."[--socket=<socket>] " + ."[--user=<user>] [--password=<password>] [--help|-h] [--format=(html|text)] [--loadqueries=<file>] [--savequeries=<file>]\n\n"; + print STDERR "\t--database=<db name> ALL may be specified to examine all " + ."databases\n"; + print STDERR "\t--hostname=<host>:<port> can be used to designate a " + ."specific port\n"; + print STDERR "\t--hostname defaults to localhost\n"; + print STDERR "\t--user and --password default to empty string\n"; + print STDERR "\t--format=(html|text) Output format\n"; + print STDERR "\t--excludetables Comma separated list of table names to skip\n"; + print STDERR "\t--excludedbs Comma separated list of database names to skip\n"; + print STDERR "\t--savequeries=<file> saves all queries to the DB into <file>\n"; + print STDERR "\t--loadqueries=<file> loads query results from <file>. Doesn't connect to DB.\n"; exit(1); } + +$hostname= 'localhost' unless $hostname; + +my %queries; # used for loadqueries/savequeries + +if(!$loadqueries) { - my $database= $ARGV[0]; - my $hostname= $ARGV[1]; - my $user= $ARGV[2]; - my $password= $ARGV[3]; - my $dsn = "DBI:mysql:database=$database;host=$hostname"; + my $dsn = "DBI:mysql:host=$hostname"; + $dsn.= ";mysql_socket=$socket" if ($socket); $dbh= DBI->connect($dsn, $user, $password) or exit(1); - $template->param(db => $database); - $template->param(dsn => $dsn); + $report->dsn($dsn); } -my @releases = ({rel=>'4.1'},{rel=>'5.0'},{rel=>'5.1'}); #,{rel=>'5.1-dd'}); -$template->param(releases => \@releases); +my @dbs; +if ($database && !($database =~ /^ALL$/i)) +{ + @dbs = split(',', $database); +} +else +{ + # Do all databases + @dbs = map { $_->[0] } @{ $dbh->selectall_arrayref("show databases") }; +} + +my %withdb = map {$_ => 1} @dbs; +foreach (split ",", $excludedbs || '') +{ + delete $withdb{$_}; +} +delete $withdb{'mysql'}; +delete $withdb{'INFORMATION_SCHEMA'}; +delete $withdb{'information_schema'}; + +my $dblist = join (',', map { $dbh->quote($_) } keys %withdb ); -my $tables = $dbh->selectall_arrayref("show tables"); +$excludetables = join (',', map { $dbh->quote($_) } split ',', $excludetables ) + if $excludetables; -my @table_size; +if(!$loadqueries) +{ + if (scalar(keys %withdb)>1) + { + $report->database("databases: $dblist"); + } + else + { + $report->database("database: $dblist"); + } +} +else +{ + open Q,"< $loadqueries"; + my @q= <Q>; + my $VAR1; + my $e= eval join("",@q) or die $@; + %queries= %$e; + close Q; + $report->database("file:$loadqueries"); +} -my @dbDataMemory; -my @dbIndexMemory; -my @NoOfAttributes; -my @NoOfIndexes; -my @NoOfTables; -$NoOfTables[$_]{val} = @{$tables} foreach 0..$#releases; +$report->versions('4.1','5.0','5.1'); +my $tables; -sub align { - my($to,@unaligned) = @_; - my @aligned; - foreach my $x (@unaligned) { - push @aligned, $to * POSIX::floor(($x+$to-1)/$to); +if($loadqueries) +{ + $tables= $queries{"show tables"}; +} +else +{ + my $sql= "select t.TABLE_NAME,t.TABLE_SCHEMA " . + " from information_schema.TABLES t " . + " where t.TABLE_SCHEMA in ( $dblist ) "; + + $sql.=" and t.TABLE_NAME not in " . + " ( $excludetables )" + if ($excludetables); + + $tables= $dbh->selectall_arrayref($sql); + + if (!$tables) { + print "WARNING: problem selecing from INFORMATION SCHEMA ($sql)\n"; + if ($#dbs>0) { + print "\t attempting to fallback to show tables from $database"; + $tables= $dbh->selectall_arrayref("show tables from $database\n"); + } else { + print "All Databases not supported in 4.1. Please specify --database=\n"; + } } - return @aligned; + $queries{"show tables"}= $tables; } sub do_table { - my $table= shift; + my $t= shift; my $info= shift; my %indexes= %{$_[0]}; my @count= @{$_[1]}; - my @columns; - my %columnsize; # used for index calculations - # We now work out the DataMemory usage - - # sizes for 4.1, 5.0, 5.1 and 5.1-dd - my @totalsize= (0,0,0,0); - @totalsize= @totalsize[0..$#releases]; # limit to releases we're outputting - my $nrvarsize= 0; + $t->dm_versions($report->versions); + $t->vdm_versions('5.1'); + $t->ddm_versions('5.1'); - foreach(keys %$info) + foreach my $colname (keys %$info) { - my @realsize = (0,0,0,0); - my @varsize = (0,0,0,0); - my $type; - my $size; - my $name= $_; - my $is_varsize= 0; + my $col= new MySQL::NDB::Size::Column(name => $colname); + my ($type, $size); + + $col->Key($$info{$colname}{Key}) + if(defined($$info{$colname}{Key}) &&$$info{$colname}{Key} ne ''); + + $col->null_bits(defined($$info{$colname}{Null}) + && $$info{$colname}{Null} eq 'YES'); - if($$info{$_}{Type} =~ /^(.*?)\((\d+)\)/) + if(defined($$info{$colname}{Type}) + && $$info{$colname}{Type} =~ /^(.*?)\((.+)\)/) { $type= $1; $size= $2; } + elsif(exists($$info{$colname}{type})) + { + # we have a Column object.. + $type= $$info{$colname}{type}; + $size= $$info{$colname}{size}; + } else { - $type= $$info{$_}{Type}; + $type= $$info{$colname}{Type}; } + $col->type($type); + $col->size($size); if($type =~ /tinyint/) - {@realsize=(1,1,1,1)} + {$col->dm(1)} elsif($type =~ /smallint/) - {@realsize=(2,2,2,2)} + {$col->dm(2)} elsif($type =~ /mediumint/) - {@realsize=(3,3,3,3)} + {$col->dm(3)} elsif($type =~ /bigint/) - {@realsize=(8,8,8,8)} + {$col->dm(8)} elsif($type =~ /int/) - {@realsize=(4,4,4,4)} + {$col->dm(4)} elsif($type =~ /float/) { - if($size<=24) - {@realsize=(4,4,4,4)} + my @sz= split ',', $size; + $size= $sz[0]+$sz[1]; + if(!defined($size) || $size<=24) + {$col->dm(4)} else - {@realsize=(8,8,8,8)} + {$col->dm(8)} } elsif($type =~ /double/ || $type =~ /real/) - {@realsize=(8,8,8,8)} + {$col->dm(8)} elsif($type =~ /bit/) { - my $a=($size+7)/8; - @realsize = ($a,$a,$a,$a); + # bitfields stored in null bits + $col->null_bits($size+($col->null_bits()||0)); } elsif($type =~ /datetime/) - {@realsize=(8,8,8,8)} + {$col->dm(8)} elsif($type =~ /timestamp/) - {@realsize=(4,4,4,4)} + {$col->dm(4)} elsif($type =~ /date/ || $type =~ /time/) - {@realsize=(3,3,3,3)} + {$col->dm(3)} elsif($type =~ /year/) - {@realsize=(1,1,1,1)} + {$col->dm(1)} + elsif($type =~ /enum/ || $type =~ /set/) + { + # I *think* this is correct.. + my @items= split ',',$size; + $col->dm(ceil((scalar @items)/256)); + } elsif($type =~ /varchar/ || $type =~ /varbinary/) { my $fixed=$size+ceil($size/256); - my @dynamic=$dbh->selectrow_array("select avg(length(`" - .$name - ."`)) from `".$table.'`'); - $dynamic[0]=0 if !$dynamic[0]; - $dynamic[0]+=ceil($dynamic[0]/256); # size bit - $nrvarsize++; - $is_varsize= 1; - $varsize[3]= ceil($dynamic[0]); - @realsize= ($fixed,$fixed,ceil($dynamic[0]),$fixed); + $col->dm_overhead_set('length' => ceil($size/256)); + $col->dm($fixed); + if(!$col->Key()) # currently keys must be non varsized + { + my $sql= sprintf("select avg(length(`%s`)) " . + " from `%s`.`%s` " , + $colname, $t->schema(), $t->table_name()); + + my @dynamic; + if($loadqueries) + { + @dynamic= @{$queries{$sql}}; + } + else + { + @dynamic= $dbh->selectrow_array($sql); + $queries{$sql}= \@dynamic; + } + $dynamic[0]=0 if ! defined($dynamic[0]) || !@dynamic; + $dynamic[0]+=ceil($size/256); # size bit + $col->is_varsize(1); + $col->ver_dm('5.1',ceil($dynamic[0])); + } } elsif($type =~ /binary/ || $type =~ /char/) - {@realsize=($size,$size,$size,$size)} + {$col->dm($size)} elsif($type =~ /text/ || $type =~ /blob/) { - @realsize=(8+256,8+256,8+256,8+256); + $col->dm_overhead_set('length' => 8); + $col->dm(8+256); my $blobhunk= 2000; $blobhunk= 8000 if $type=~ /longblob/; $blobhunk= 4000 if $type=~ /mediumblob/; - my @blobsize=$dbh->selectrow_array("select SUM(CEILING(". - "length(`$name`)/$blobhunk))". - "from `".$table."`"); + my $sql= sprintf("select SUM(CEILING(length(`%s`)/%s)) " . + " from `%s`.`%s`" , + $colname, $blobhunk, + $t->schema(), $t->table_name() ); + + my @blobsize; + if($loadqueries) + { + @blobsize= @{$queries{$sql}}; + } + else + { + @blobsize= $dbh->selectrow_array($sql); + $queries{$sql}= \@blobsize; + } $blobsize[0]=0 if !defined($blobsize[0]); - #$NoOfTables[$_]{val} += 1 foreach 0..$#releases; # blob uses table - do_table($table."\$BLOB_$name", + + # Is a supporting table, add it to the lists: + $report->supporting_tables_set($t->schema().".".$t->name()."\$BLOB_$colname" => 1); + $t->supporting_tables_push($t->schema().".".$t->name()."\$BLOB_$colname"); + + my $st= new MySQL::NDB::Size::Table(name => + $t->name()."\$BLOB_$colname", + schema => $t->schema(), + rows => $blobsize[0], + row_dm_overhead => + { '4.1' => 12, + '5.0' => 12, + '5.1' => 16, + }, + row_vdm_overhead => + { '5.1' => 8 }, + row_ddm_overhead => + { '5.1' => 8 }, + ); + + + + do_table($st, {'PK'=>{Type=>'int'}, 'DIST'=>{Type=>'int'}, 'PART'=>{Type=>'int'}, @@ -195,26 +722,14 @@ sub do_table { \@blobsize); } - @realsize= @realsize[0..$#releases]; - @realsize= align(4,@realsize); - - $totalsize[$_]+=$realsize[$_] foreach 0..$#totalsize; - - my @realout; - push @realout,{val=>$_} foreach @realsize; - - push @columns, { - name=>$name, - type=>$type, - is_varsize=>$is_varsize, - size=>$size, - key=>$$info{$_}{Key}, - datamemory=>\@realout, - }; - - $columnsize{$name}= \@realsize; # used for index calculations + $col->type($type); + $col->size($size); + $t->columns_set( $colname => $col ); } - + #print "setting tables: ",$t->schema(), $t->table_name(), $t->name, $t->real_table_name || "" , "\n"; + # Use $t->name here instead of $t->table_name() to avoid namespace conflicts + $report->tables_set( $t->schema().".".$t->name() => $t ); + # And now... the IndexMemory usage. # # Firstly, we assemble some information about the indexes. @@ -222,170 +737,1057 @@ sub do_table { # we can still connect to pre-5.0 mysqlds. if(!defined($indexes{PRIMARY})) { - my @usage= ({val=>8},{val=>8},{val=>8},{val=>8}); - @usage= @usage[0..$#releases]; - $indexes{PRIMARY}= { - type=>'BTREE', - unique=>1, - comment=>'Hidden pkey created by NDB', - columns=>['HIDDEN_NDB_PKEY'], - }; - push @columns, { - name=>'HIDDEN_NDB_PKEY', - type=>'bigint', - size=>8, - key=>'PRI', - datamemory=>\@usage, - }; - $columnsize{'HIDDEN_NDB_PKEY'}= [8,8,8]; - } - - my @IndexDataMemory= ({val=>0},{val=>0},{val=>0},{val=>0}); - my @RowIndexMemory= ({val=>0},{val=>0},{val=>0},{val=>0}); - @IndexDataMemory= @IndexDataMemory[0..$#releases]; - @RowIndexMemory= @RowIndexMemory[0..$#releases]; + my $i= new MySQL::NDB::Size::Index( + name => 'PRIMARY', + unique => 1, + comment =>'Hidden pkey created by NDB', + type =>'BTREE', + columns => ['HIDDEN_NDB_PKEY'], + ); + + $i->im(16); + $i->dm(16); + $i->ver_im('4.1',25+8); + + $t->indexes_set('PRIMARY' => $i); + $t->indexed_columns_set('HIDDEN_NDB_PKEY' => 1); + + $t->columns_set('HIDDEN_NDB_PKEY' => + new MySQL::NDB::Size::Column( + name => 'HIDDEN_NDB_PKEY', + type => 'bigint', + dm => 8, + Key => 'PRI')); + } my @indexes; - foreach my $index (keys %indexes) { - my $im41= 25; - $im41+=$columnsize{$_}[0] foreach @{$indexes{$index}{columns}}; - my @im = ({val=>$im41},{val=>25},{val=>25}); #,{val=>25}); - my @dm = ({val=>10},{val=>10},{val=>10}); #,{val=>10}); - push @indexes, { - name=>$index, - type=>$indexes{$index}{type}, - columns=>join(',',@{$indexes{$index}{columns}}), - indexmemory=>\@im, - datamemory=>\@dm, - }; - $IndexDataMemory[$_]{val}+=$dm[$_]{val} foreach 0..$#releases; - $RowIndexMemory[$_]{val}+=$im[$_]{val} foreach 0..$#releases; - } - - # total size + 16 bytes overhead - my @TotalDataMemory; - my @RowOverhead = ({val=>16},{val=>16},{val=>16}); #,{val=>24}); - # 5.1 has ptr to varsize page, and per-varsize overhead - my @nrvarsize_mem= ({val=>0},{val=>0}, - {val=>8}); #,{val=>0}); + + # We model the PRIMARY first as needed for secondary uniq indexes + if(defined($indexes{'PRIMARY'})) { - my @a= align(4,$nrvarsize*2); - $nrvarsize_mem[2]{val}+=$a[0]+$nrvarsize*4; - } - - $TotalDataMemory[$_]{val}=$IndexDataMemory[$_]{val}+$totalsize[$_]+$RowOverhead[$_]{val}+$nrvarsize_mem[$_]{val} foreach 0..$#releases; - - my @RowDataMemory; - push @RowDataMemory,{val=>$_} foreach @totalsize; - - my @RowPerPage; - push @RowPerPage,{val=>(floor((32768-128)/$TotalDataMemory[$_]{val}))} foreach 0..$#TotalDataMemory; - - my @RowPerIndexPage; - push @RowPerIndexPage,{val=>(floor(8192/$RowIndexMemory[$_]{val}))} foreach 0..$#TotalDataMemory; - - my @DataMemory; - push @DataMemory,{val=>ceil(($count[0]/($RowPerPage[$_]{val})))*32} foreach 0..$#RowPerPage; - - my @IndexMemory; - push @IndexMemory,{val=>ceil(($count[0]/($RowPerIndexPage[$_]{val})))*8} foreach 0..$#RowPerPage; - - my $count= $count[0]; - my @counts; - $counts[$_]{val}= $count foreach 0..$#releases; - - my @nrvarsize_rel= ({val=>0},{val=>0}, - {val=>$nrvarsize}); #,{val=>0}); - - push @table_size, { - table=>$table, - indexes=>\@indexes, - columns=>\@columns, - count=>\@counts, - RowOverhead=>\@RowOverhead, - RowDataMemory=>\@RowDataMemory, - nrvarsize=>\@nrvarsize_rel, - nrvarsize_mem=>\@nrvarsize_mem, - releases=>\@releases, - IndexDataMemory=>\@IndexDataMemory, - TotalDataMemory=>\@TotalDataMemory, - RowPerPage=>\@RowPerPage, - DataMemory=>\@DataMemory, - RowIndexMemory=>\@RowIndexMemory, - RowPerIndexPage=>\@RowPerIndexPage, - IndexMemory=>\@IndexMemory, - - }; - - $dbDataMemory[$_]{val} += $DataMemory[$_]{val} foreach 0..$#releases; - $dbIndexMemory[$_]{val} += $IndexMemory[$_]{val} foreach 0..$#releases; - $NoOfAttributes[$_]{val} += @columns foreach 0..$#releases; - $NoOfIndexes[$_]{val} += @indexes foreach 0..$#releases; -} + my $index= 'PRIMARY'; + my $i= new MySQL::NDB::Size::Index( + name => $index, + unique => $indexes{$index}{unique}, + comment => $indexes{$index}{comment}, + type => $indexes{$index}{type}, + columns => [@{$indexes{$index}{columns}}], + ); + my $im41= 25; # keep old estimate for 4.1 + $im41+= $t->columns->{$_}->dm foreach @{$indexes{$index}{columns}}; + $i->im(16); # estimate from Johan + $i->dm(16) if $indexes{$index}{unique}; # estimate from Johan + $i->ver_im('4.1',$im41); + + $t->indexes_set($index => $i); + $t->indexed_columns_set($_ => 1) + foreach @{$indexes{$index}{columns}}; + } + + foreach my $index (keys %indexes) { + next if $index eq 'PRIMARY'; + + if(!$indexes{$index}{unique}) + { + my $i= new MySQL::NDB::Size::Index( + name => $index, + unique => $indexes{$index}{unique}, + comment => $indexes{$index}{comment}, + type => $indexes{$index}{type}, + columns => [@{$indexes{$index}{columns}}], + ); + $i->dm(16); + $t->indexes_set($index => $i); + $t->indexed_columns_set($_ => 1) + foreach @{$indexes{$index}{columns}}; + } + else + { + my $i= new MySQL::NDB::Size::Index( + name => $index, + unique => $indexes{$index}{unique}, + comment => $indexes{$index}{comment}, + type => $indexes{$index}{type}, + columns => [@{$indexes{$index}{columns}}, + @{$t->indexes->{'PRIMARY'}->columns()}], + ); + + $i->is_supporting_table(1); + $t->indexes_set($index => $i); + + my %idxcols; + foreach(@{$i->columns()}) + { + $idxcols{$_} = $t->columns->{$_} + } + # Is a supporting table, add it to the lists: + my $idxname= $t->name().'_'.join('_',@{$indexes{$index}{columns}}). + "\$unique"; + $report->supporting_tables_set($t->schema().".".$idxname => 1); + $t->supporting_tables_push($t->schema().".".$idxname); + + $t->indexed_columns_set($_ => 1) + foreach @{$indexes{$index}{columns}}; + + my $st= new MySQL::NDB::Size::Table(name => $idxname, + real_table_name => $t->table_name(), + rows => $count[0], + schema => $t->schema(), + row_dm_overhead => + { '4.1' => 12, + '5.0' => 12, + '5.1' => 16+4, + }, + row_vdm_overhead => + { '5.1' => 8 }, + row_ddm_overhead => + { '5.1' => 8 }, + ); + do_table($st, + \%idxcols, + { + 'PRIMARY' => { + 'unique' => 0,#$indexes{$index}{unique}, + 'columns' => [@{$indexes{$index}{columns}}], + 'type' => 'BTREE', + } + }, + \@count); + } + } + + $t->compute_row_size($report->versions); + +} # do_table foreach(@{$tables}) { my $table= @{$_}[0]; - my $info= $dbh->selectall_hashref('describe `'.$table.'`',"Field"); - my @count = $dbh->selectrow_array('select count(*) from `'.$table.'`'); + my $schema = @{$_}[1] || $database; + my $info; + { + my $sql= 'describe `'.$schema.'`.`'.$table.'`'; + if($loadqueries) + { + $info= $queries{$sql}; + } + else + { + $info= $dbh->selectall_hashref($sql,"Field"); + $queries{$sql}= $info; + } + } + my @count; + { + my $sql= 'select count(*) from `'.$schema.'`.`'.$table.'`'; + if($loadqueries) + { + @count= @{$queries{$sql}}; + } + else + { + @count= $dbh->selectrow_array($sql); + $queries{$sql}= \@count; + } + } my %indexes; { - my $sth= $dbh->prepare("show index from `".$table.'`'); - $sth->execute; - while(my $i = $sth->fetchrow_hashref) - { + my @show_indexes; + { + my $sql= "show index from `".$schema.'`.`'.$table.'`'; + if($loadqueries) + { + @show_indexes= @{$queries{$sql}}; + } + else + { + my $sth= $dbh->prepare($sql); + $sth->execute; + while(my $i = $sth->fetchrow_hashref) + { + push @show_indexes, $i; + } + $queries{$sql}= \@show_indexes; + } + } + foreach my $i(@show_indexes) + { $indexes{${%$i}{Key_name}}= { type=>${%$i}{Index_type}, unique=>!${%$i}{Non_unique}, comment=>${%$i}{Comment}, } if !defined($indexes{${%$i}{Key_name}}); - $indexes{${%$i}{Key_name}}{columns}[${%$i}{Seq_in_index}-1]= + $indexes{${%$i}{Key_name}}{columns}[${%$i}{Seq_in_index}-1]= ${%$i}{Column_name}; } } - do_table($table, $info, \%indexes, \@count); + my $t= new MySQL::NDB::Size::Table(name => $table, + schema => $schema, + rows => $count[0], + row_dm_overhead => + { '4.1' => 12, + '5.0' => 12, + '5.1' => 16, + }, + row_vdm_overhead => { '5.1' => 8 }, + row_ddm_overhead => { '5.1' => 8 }, + ); + + + do_table($t, $info, \%indexes, \@count); +} + +# compute table estimates +while(my ($tname,$t)= $report->tables_each()) +{ + $t->compute_estimate(); +} + +# Now parameters.... + +$report->parameters_set('NoOfTables' => + new MySQL::NDB::Size::Parameter(name=>'NoOfTables', + mem_per_item=>20, + default=>128) + ); + +$report->parameters->{'NoOfTables'}->value_set($_ => scalar @{$report->tables_keys()}) + foreach $report->versions; + +$report->parameters_set('NoOfAttributes' => + new MySQL::NDB::Size::Parameter(name=>'NoOfAttributes', + mem_per_item=>0.2, + default=>1000) + ); + +{ + my $attr= 0; + while(my ($tname,$t)= $report->tables_each()) + { + $attr+= scalar @{$t->columns_keys()}; + } + $report->parameters->{'NoOfAttributes'}->value_set($_ => $attr) + foreach $report->versions; +} + + +$report->parameters_set('NoOfOrderedIndexes' => + new MySQL::NDB::Size::Parameter(name=>'NoOfOrderedIndexes', + mem_per_item=>10, + default=>128) + ); +{ + my $attr= 0; + while(my ($tname,$t)= $report->tables_each()) + { + next if $report->supporting_tables_exists($tname); + $attr+= scalar @{$t->indexes_keys()}; + } + $report->parameters->{'NoOfOrderedIndexes'}->value_set($_ => $attr) + foreach $report->versions; +} + +$report->parameters_set('NoOfUniqueHashIndexes' => + new MySQL::NDB::Size::Parameter(name=>'NoOfUniqueHashIndexes', + mem_per_item=>15, + default=>64) + ); +{ + my $attr= 0; + while(my ($tname,$t)= $report->tables_each()) + { + next if not $tname =~ /\$unique$/; + $attr++; + } + $report->parameters->{'NoOfUniqueHashIndexes'}->value_set($_ => $attr) + foreach $report->versions; +} + +# Size of trigger is not documented +$report->parameters_set('NoOfTriggers' => + new MySQL::NDB::Size::Parameter(name=>'NoOfTriggers', + mem_per_item=>0, + default=>768) + ); + +{ + $report->parameters->{'NoOfTriggers'}->value_set( + $_ => ( + (3* + $report->parameters->{'NoOfUniqueHashIndexes'}->value->{$_}) + + + $report->parameters->{'NoOfOrderedIndexes'}->value->{$_} + + + (4* # for backups (3) and replication (1??) + $report->parameters->{'NoOfTables'}->value->{$_}) + + ) + ) + foreach $report->versions; +} + +# DataMemory is in bytes... +$report->parameters_set('DataMemory' => + new MySQL::NDB::Size::Parameter(name=>'DataMemory', + mem_per_item=>1024, + unit=>'KB', + default=>80*1024) + ); +$report->parameters_set('IndexMemory' => + new MySQL::NDB::Size::Parameter(name=>'IndexMemory', + mem_per_item=>1024, + unit=>'KB', + default=>18*1024) + ); + +{ + foreach my $ver ($report->versions) + { + my $dm=0; + my $im=0; + while(my ($tname,$t)= $report->tables_each()) + { + $dm+=$t->dm_needed->{$ver}; + $dm+=$t->vdm_needed->{$ver} || 0; + $im+=$t->im_needed->{$ver}; + } + $report->parameters->{'DataMemory'}->value_set($ver => $dm/1024); + $report->parameters->{'IndexMemory'}->value_set($ver => $im/1024); + } } -my @NoOfTriggers; -# for unique hash indexes -$NoOfTriggers[$_]{val} += $NoOfIndexes[$_]{val}*3 foreach 0..$#releases; -# for ordered index -$NoOfTriggers[$_]{val} += $NoOfIndexes[$_]{val} foreach 0..$#releases; -my @ParamMemory; -foreach (0..$#releases) { - $ParamMemory[0]{releases}[$_]{val}= POSIX::ceil(200*$NoOfAttributes[$_]{val}/1024); - $ParamMemory[0]{name}= 'Attributes'; +if($savequeries) +{ + open Q, "> $savequeries"; + print Q Dumper(\%queries); + close Q; +} + +use Data::Dumper; - $ParamMemory[1]{releases}[$_]{val}= 20*$NoOfTables[$_]{val}; - $ParamMemory[1]{name}= 'Tables'; +if($debug) +{ + eval 'print STDERR Dumper($report)'; +} - $ParamMemory[2]{releases}[$_]{val}= 10*$NoOfIndexes[$_]{val}; - $ParamMemory[2]{name}= 'OrderedIndexes'; +$format= "text" unless $format; - $ParamMemory[3]{releases}[$_]{val}= 15*$NoOfIndexes[$_]{val}; - $ParamMemory[3]{name}= 'UniqueHashIndexes'; +if($format eq 'text') +{ + my $text_out= new MySQL::NDB::Size::Output::Text($report); + $text_out->output(); } +elsif($format eq 'html') +{ + my $html_out= new MySQL::NDB::Size::Output::HTML($report); + $html_out->output(); +} + +package MySQL::NDB::Size::Output::Text; +use Data::Dumper; -$template->param(tables => \@table_size); -$template->param(Parameters => [{name=>'DataMemory (kb)', - releases=>\@dbDataMemory}, - {name=>'IndexMemory (kb)', - releases=>\@dbIndexMemory}, - {name=>'MaxNoOfTables', - releases=>\@NoOfTables}, - {name=>'MaxNoOfAttributes', - releases=>\@NoOfAttributes}, - {name=>'MaxNoOfOrderedIndexes', - releases=>\@NoOfIndexes}, - {name=>'MaxNoOfUniqueHashIndexes', - releases=>\@NoOfIndexes}, - {name=>'MaxNoOfTriggers', - releases=>\@NoOfTriggers} - ] - ); -$template->param(ParamMemory => \@ParamMemory); +sub new { bless { report=> $_[1] }, $_[0]} + +sub ul +{ + my $s= $_[1]."\n"; + $s.='-' foreach (1..length($_[1])); + return $s.="\n"; +} -print $template->output; +sub output +{ + my $self= shift; + my $r= $self->{report}; + + print $self->ul("ndb_size.pl report for ". $r->database(). + " (".(($r->tables_count()||0)-($r->supporting_tables_count()||0)). + " tables)"); + + print "Connected to: ".$r->dsn()."\n\n"; + + print "Including information for versions: ". + join(', ',@{$r->versions})."\n\n"; + + foreach my $tname (@{$r->tables_keys()}) + { + my $t= $r->tables->{$tname}; +# next if $r->supporting_tables_exists($tname); + + print $self->ul($tname)."\n"; + + # format strings + my $f= "%25s "; + my $v= "%10s "; + + # Columns + print "DataMemory for Columns (* means varsized DataMemory):\n"; + printf $f.'%20s %9s %5s','Column Name','Type','Varsized', 'Key'; + printf $v, $_ foreach @{$r->versions}; + print "\n"; + my %dm_totals; + my %vdm_totals; + while(my ($cname, $c)= $t->columns_each()) + { + $c->type =~ /^([^\(]*)/g; + printf $f.'%20s %9s %5s', + $cname, + $1.( + ( $c->size and not $c->type() =~ /(enum|set)/) + ? '('.$c->size.')' + :'' ), + ($c->is_varsize)? 'Y':' ', + (defined($c->Key))?$c->Key:' '; + foreach(@{$r->versions}) + { + if($c->ver_dm_exists($_)) + { + printf $v, $c->ver_dm($_).(($c->is_varsize)?'*':''); + if($c->is_varsize()) + { + $vdm_totals{$_}+= $c->ver_dm($_); + } + else + { + $dm_totals{$_}+= $c->ver_dm($_); + } + } + else + { + printf $v, $c->dm||'N/A'; + $dm_totals{$_}+=$c->dm||0; + } + } + print "\n"; + } + printf $f.'%20s %9s %5s','','','', ''; + printf $v, '--' foreach @{$t->dm_versions}; + print "\n"; + printf $f.'%20s %9s %5s','Fixed Size Columns DM/Row','','',''; + printf $v, $dm_totals{$_} foreach @{$r->versions}; + print "\n"; + printf $f.'%20s %9s %5s','Varsize Columns DM/Row','','',''; + printf $v, $vdm_totals{$_} || 0 foreach @{$r->versions}; + print "\n"; + + + # DM for Indexes + print "\n\nDataMemory for Indexes:\n"; + printf $f.'%20s ','Index Name','Type'; + printf $v, $_ foreach @{$r->versions}; + print "\n"; + my %idx_dm_totals; + while(my ($iname, $i)= $t->indexes_each()) + { + printf $f.'%20s ',$iname,$i->type(); + foreach(@{$r->versions}) + { + if($i->ver_dm_exists($_)) + { + printf $v, $i->ver_dm($_).(($i->is_varsize)?'*':''); + $idx_dm_totals{$_}+= $i->ver_dm($_); + } + else + { + printf $v, ((defined($i->dm))?$i->dm:'N/A'); + $idx_dm_totals{$_}+= $i->dm if defined($i->dm); + } + } + print "\n"; + } + printf $f.'%20s ','',''; + printf $v, '--' foreach @{$r->versions}; + print "\n"; + printf $f.'%20s ','Total Index DM/Row',''; + printf $v, (defined($idx_dm_totals{$_}))?$idx_dm_totals{$_}:0 + foreach @{$r->versions}; + print "\n\n"; + + if(@{$t->supporting_tables()}) + { + print "\n\nSupporting Tables DataMemory/Row"; + my %supp_total; + foreach(@{$t->supporting_tables()}) + { + print "\n"; + printf $f, $_; + my $st= $r->tables->{$_}; + printf $v, $st->row_dm_size->{$_} foreach @{$st->dm_versions}; + $supp_total{$_}+=$st->row_dm_size->{$_} + foreach @{$st->dm_versions}; + } + print "\n"; + printf $f, ''; + printf $v, '--' foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'This DataMemory/Row'; + printf $v, $t->row_dm_size->{$_} foreach @{$t->dm_versions}; + $supp_total{$_}+=$t->row_dm_size->{$_} + foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'Total DM/Row'; + printf $v, $supp_total{$_} foreach @{$t->dm_versions}; + print " Includes DM in other tables\n"; + } + + # IM for Columns + print "\n\nIndexMemory for Indexes:\n"; + printf $f,'Index Name'; + printf $v, $_ foreach @{$r->versions}; + print "\n"; + my %im_totals; + foreach my $iname (@{$t->indexes_keys()}) + { + my $i= $t->indexes->{$iname}; + next if $i->is_supporting_table(); + + printf $f, $iname; + + foreach(@{$r->versions}) + { + if(!defined($i->im)) + { + printf $v,'N/A'; + next; + } + if($i->ver_im_exists($_)) + { + printf $v, $i->ver_im->{$_}; + $im_totals{$_}+= $i->ver_im->{$_}; + } + else + { + printf $v, $i->im; + $im_totals{$_}+=$i->im; + } + } + print "\n"; + } + printf $f,''; + printf $v, '--' foreach @{$t->dm_versions}; + print "\n"; + printf $f,'Indexes IM/Row'; + printf $v, $im_totals{$_} foreach @{$r->versions}; + print "\n"; + + if(@{$t->supporting_tables()}) + { + print "\n\nSupporting Tables IndexMemory/Row"; + my %supp_total; + foreach(@{$t->supporting_tables()}) + { + print "\n"; + my $st= $r->tables->{$_}; + foreach(@{$st->indexes_keys()}) + { + printf $f, $st->schema().".".$st->name() if $_ eq 'PRIMARY'; + printf $f, $st->schema().".".$st->name().$_ if $_ ne 'PRIMARY'; + my $sti= $st->indexes->{$_}; + printf $v, ($sti->ver_im_exists($_)) + ?$sti->ver_im->{$_} + :$sti->im() foreach @{$st->dm_versions}; + $supp_total{$_}+= ($sti->ver_im_exists($_)) + ?$sti->ver_im->{$_} + :$sti->im() foreach @{$st->dm_versions}; + + } + } + print "\n"; + printf $f, ''; + printf $v, '--' foreach @{$t->dm_versions}; + print "\n"; + print "\n"; + printf $f, 'Total Suppt IM/Row'; + printf $v, $supp_total{$_} foreach @{$t->dm_versions}; + print "\n"; + } + + print "\n\n\nSummary (for THIS table):\n"; + printf $f, ''; + printf $v, $_ foreach @{$r->versions}; + print "\n"; + printf $f, 'Fixed Overhead DM/Row'; + printf $v, $t->row_dm_overhead->{$_} foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'NULL Bytes/Row'; + printf $v, $t->dm_null_bytes->{$_}||0 foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'DataMemory/Row'; + printf $v, $t->row_dm_size->{$_} foreach @{$t->dm_versions}; + print " (Includes overhead, bitmap and indexes)\n"; + + print "\n"; + printf $f, 'Varsize Overhead DM/Row'; + printf $v, $t->row_vdm_overhead->{$_}||0 foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'Varsize NULL Bytes/Row'; + printf $v, $t->vdm_null_bytes->{$_}||0 foreach @{$t->dm_versions}; + print "\n"; + printf $f, 'Avg Varside DM/Row'; + printf $v, (exists($t->row_vdm_size->{$_})? + $t->row_vdm_size->{$_}: 0) + foreach @{$r->versions}; + print "\n\n"; + printf $f, 'No. Rows'; + printf $v, $t->rows foreach @{$r->versions}; + print "\n\n"; + printf $f, 'Rows/'.($t->dm_pagesize()/1024).'kb DM Page'; + printf $v, $t->dm_rows_per_page->{$_} foreach @{$r->versions}; + print "\n"; + printf $f, 'Fixedsize DataMemory (KB)'; + printf $v, $t->dm_needed->{$_}/1024 foreach @{$r->versions}; + print "\n\n"; + printf $f, 'Rows/'.($t->vdm_pagesize()/1024).'kb Varsize DM Page'; + printf $v, $t->vdm_rows_per_page->{$_}||0 foreach @{$r->versions}; + print "\n"; + printf $f, 'Varsize DataMemory (KB)'; + printf $v, ($t->vdm_needed->{$_}||0)/1024 foreach @{$r->versions}; + print "\n\n"; + printf $f, 'Rows/'.($t->im_pagesize()/1024).'kb IM Page'; + printf $v, $t->im_rows_per_page->{$_} foreach @{$r->versions}; + print "\n"; + printf $f, 'IndexMemory (KB)'; + printf $v, $t->im_needed->{$_}/1024 foreach @{$r->versions}; + + print "\n\n\n"; + } + + print "\n\n\n"; + print $self->ul("Parameter Minimum Requirements"); + print "* indicates greater than default\n\n"; + printf "%25s ","Parameter"; + printf "%15s ",'Default' ; + printf "%15s%1s ",$_,'' foreach @{$r->versions}; + print "\n"; + while( my ($pname, $p)= $r->parameters_each()) + { + printf "%25s ",$pname.(($p->unit)?' ('.$p->unit.')':''); + printf "%15u ", $p->default; + printf "%15u%1s ", $p->value->{$_}, + ($p->value->{$_} > $p->default)?'*':'' + foreach @{$r->versions}; + print "\n"; + } + print "\n\n\n"; +} + +sub table +{ + my $self= shift; + my $t= shift; +} + +package MySQL::NDB::Size::Output::HTML; + +sub new { bless { report=> $_[1] }, $_[0]} + +sub tag +{ + my ($self,$tag,$content)= @_; + return "<$tag>$content</$tag>\n"; +} + +sub h1 { my ($self,$t)= @_; return $self->tag('h1',$t); } +sub h2 { my ($self,$t)= @_; return $self->tag('h2',$t); } +sub h3 { my ($self,$t)= @_; return $self->tag('h3',$t); } +sub h4 { my ($self,$t)= @_; return $self->tag('h4',$t); } + +sub p { my ($self,$t)= @_; return $self->tag('p',$t); } +sub b { my ($self,$t)= @_; return $self->tag('b',$t); } + +sub th +{ + my ($self)= shift; + my $c; + $c.=$self->tag('th',$_) foreach @_; + return $self->tag('tr',$c); +} + +sub tr +{ + my ($self)= shift; + my $c; + $c.=$self->tag('td',$_) foreach @_; + return $self->tag('tr',$c); +} + +sub td { my ($self,$t)= @_; return $self->tag('td',$t); } + +sub ul +{ + my ($self)= shift; + my $c; + $c.= " ".$self->li($_) foreach @_; + return $self->tag('ul',$c); +} + +sub li { my ($self,$t)= @_; return $self->tag('li',$t); } + +sub href +{ + my ($self,$href,$t)= @_; + $href =~ s/\$/__/g; + return "<a href=\"$href\">$t</a>"; +} + +sub aname +{ + my ($self,$href,$t)= @_; + $href =~ s/\$/__/g; + return "<a id=\"$href\">$t</a>"; +} + +sub output +{ + my $self= shift; + my $r= $self->{report}; + + print <<ENDHTML; + <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> + <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> + <head> + <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/> + <meta name="keywords" content="MySQL Cluster" /> +ENDHTML +print "<title>MySQL Cluster size estimate for ".$r->database()."</title>"; +print <<ENDHTML; + <style type="text/css"> + table { border-collapse: collapse } + td,th { border: 1px solid black } + </style> + </head> +<body> +ENDHTML + + print $self->h1("ndb_size.pl report for ". $r->database(). + " (".(($r->tables_count()||0)-($r->supporting_tables_count()||0)). + " tables)"); + + print $self->p("Connected to: ".$r->dsn()); + + print $self->p("Including information for versions: ". + join(', ',@{$r->versions})); + + if(@{$r->tables_keys()}) + { + print $self->h2("Table List"); + my @tlist; + foreach(sort @{$r->tables_keys()}) + { + push @tlist, $self->href("#$_",$_); + } + print $self->ul(@tlist); + } + + foreach my $tname (sort @{$r->tables_keys()}) + { + my $t= $r->tables->{$tname}; + + print $self->h2($self->aname($tname,$tname)); + + # format strings + my $f= "%25s "; + my $v= "%10s "; + + # Columns + print $self->h3("DataMemory for Columns"); + print $self->p("* means varsized DataMemory"); + print "<table>\n"; + print $self->th('Column Name','Type','Varsized', 'Key', + @{$r->versions}); + + my %dm_totals; + my %vdm_totals; + while(my ($cname, $c)= $t->columns_each()) + { + $c->type =~ /^([^\(]*)/g; + my @verinfo; + foreach(@{$r->versions}) + { + if($c->ver_dm_exists($_)) + { + push @verinfo, $c->ver_dm($_).(($c->is_varsize)?'*':''); + if($c->is_varsize()) + { + $vdm_totals{$_}+= $c->ver_dm($_); + } + else + { + $dm_totals{$_}+= $c->ver_dm($_); + } + } + else + { + push @verinfo, $c->dm||'N/A'; + $dm_totals{$_}+=$c->dm||0; + } + } + + print $self->tr( + $cname, + $1.( + ( $c->size and not $c->type() =~ /(enum|set)/) + ? '('.$c->size.')' + :'' ), + ($c->is_varsize)? 'Y':' ', + (defined($c->Key))?$c->Key:' ',@verinfo); + } + + { + my @dmtot; + push @dmtot, $self->b($dm_totals{$_}) foreach @{$r->versions}; + print $self->tr($self->b('Fixed Size Columns DM/Row'),'','','', + @dmtot); + + } + { + my @vdmtot; + push @vdmtot, $self->b($vdm_totals{$_} || 0) + foreach @{$r->versions}; + print $self->tr($self->b('Varsize Columns DM/Row'),'','','', + @vdmtot); + } + + print "</table>\n"; + + # DM for Indexes + print $self->h3('DataMemory for Indexes'); + print "<table>\n"; + print $self->th('Index Name','Type',@{$r->versions}); + + my %idx_dm_totals; + while(my ($iname, $i)= $t->indexes_each()) + { + my @verinfo; + foreach(@{$r->versions}) + { + if($i->ver_dm_exists($_)) + { + push @verinfo, $i->ver_dm($_).(($i->is_varsize)?'*':''); + $idx_dm_totals{$_}+= $i->ver_dm($_); + } + else + { + push @verinfo, ((defined($i->dm))?$i->dm:'N/A'); + $idx_dm_totals{$_}+= $i->dm if defined($i->dm); + } + } + printf $self->tr($iname,$i->type(),@verinfo); + } + { + my @idxtot; + push @idxtot, $self->b((defined($idx_dm_totals{$_})) + ? $idx_dm_totals{$_}:0) + foreach @{$r->versions}; + print $self->tr($self->b('Total Index DM/Row'),'', + @idxtot); + } + + print "</table>"; + + if(@{$t->supporting_tables()}) + { + print $self->h3("Supporting Tables DataMemory/Row"); + my %supp_total; + + print "<table>"; + print $self->th('Table',@{$r->versions}); + foreach(@{$t->supporting_tables()}) + { + my $st= $r->tables->{$_}; + my @stdm; + push @stdm, $st->row_dm_size->{$_} foreach @{$st->dm_versions}; + + print $self->tr($_,@stdm); + + $supp_total{$_}+=$st->row_dm_size->{$_} + foreach @{$st->dm_versions}; + } + { + my @rdmtot; + push @rdmtot, $self->b($t->row_dm_size->{$_}) + foreach @{$t->dm_versions}; + print $self->tr($self->b('This DataMemory/Row'),@rdmtot); + } + $supp_total{$_}+=$t->row_dm_size->{$_} + foreach @{$t->dm_versions}; + + { + my @tdmr; + push @tdmr, $self->b($supp_total{$_}) + foreach @{$t->dm_versions}; + print $self->tr($self->b('Total DM/Row (inludes DM in other tables)'),@tdmr); + } + print "</table>"; + } + + # IM for Columns + print $self->h3("IndexMemory for Indexes"); + print "<table>\n"; + print $self->th('Index Name', @{$r->versions}); + + my %im_totals; + foreach my $iname (@{$t->indexes_keys()}) + { + my $i= $t->indexes->{$iname}; + next if $i->is_supporting_table(); + + my @verinfo; + foreach(@{$r->versions}) + { + if(!defined($i->im)) + { + push @verinfo,'N/A'; + next; + } + if($i->ver_im_exists($_)) + { + push @verinfo, $i->ver_im->{$_}; + $im_totals{$_}+= $i->ver_im->{$_}; + } + else + { + push @verinfo, $i->im; + $im_totals{$_}+=$i->im; + } + } + print $self->tr($iname, @verinfo); + } + { + my @v; + push @v, $self->b($im_totals{$_}) foreach @{$r->versions}; + printf $self->tr('Indexes IM/Row',@v); + } + print "</table>\n"; + + if(@{$t->supporting_tables()}) + { + print $self->h3("Supporting Tables IndexMemory/Row"); + print "<table>\n"; + my %supp_total; + foreach(@{$t->supporting_tables()}) + { + my $st= $r->tables->{$_}; + foreach(@{$st->indexes_keys()}) + { + my @r; + push @r, $st->schema().".".$st->name() if $_ eq 'PRIMARY'; + push @r, $st->schema().".".$st->name().$_ if $_ ne 'PRIMARY'; + my $sti= $st->indexes->{$_}; + push @r, ($sti->ver_im_exists($_)) + ?$sti->ver_im->{$_} + :$sti->im() foreach @{$st->dm_versions}; + $supp_total{$_}+= ($sti->ver_im_exists($_)) + ?$sti->ver_im->{$_} + :$sti->im() foreach @{$st->dm_versions}; + print $self->tr(@r); + } + } + { + my @r; + push @r, $self->b($supp_total{$_}) foreach @{$t->dm_versions}; + print $self->tr($self->b('Total Suppt IM/Row'),@r); + } + print "</table>\n"; + } + + print $self->h3("Summary (for THIS table)"); + print $self->h4("Fixed Sized Part"); + print "<table>\n"; + + print $self->tr('',@{$r->versions}); + + { my @r; + push @r, $t->row_dm_overhead->{$_} foreach @{$t->dm_versions}; + print $self->tr('Fixed Overhead DM/Row',@r); + } + { my @r; + push @r, $t->dm_null_bytes->{$_}||0 foreach @{$t->dm_versions}; + print $self->tr('NULL Bytes/Row',@r); + } + { my @r; + push @r, $t->row_dm_size->{$_} foreach @{$t->dm_versions}; + print $self->tr('DataMemory/Row (incl overhead, bitmap, indexes)', + @r); + } + print "</table>\n"; + print $self->h4("Variable Sized Part"); + print "<table>\n"; + + { my @r; + push @r, $t->row_vdm_overhead->{$_}||0 foreach @{$t->dm_versions}; + print $self->tr('Varsize Overhead DM/Row',@r); + } + { my @r; + push @r, $t->vdm_null_bytes->{$_}||0 foreach @{$t->dm_versions}; + print $self->tr('Varsize NULL Bytes/Row',@r); + } + { my @r; + push @r, (exists($t->row_vdm_size->{$_})? + $t->row_vdm_size->{$_}: 0) + foreach @{$r->versions}; + print $self->tr('Avg Varside DM/Row',@r); + } + print "</table>\n"; + print $self->h4("Memory Calculations"); + print "<table>\n"; + + { my @r; + push @r, $t->rows foreach @{$r->versions}; + print $self->tr('No. Rows',@r); + } + { my @r; + push @r, $t->dm_rows_per_page->{$_} foreach @{$r->versions}; + print $self->tr('Rows/'.($t->dm_pagesize()/1024).'kb DM Page',@r); + } + { my @r; + push @r, $t->dm_needed->{$_}/1024 foreach @{$r->versions}; + print $self->tr('Fixedsize DataMemory (KB)',@r); + } + { my @r; + push @r, $t->vdm_rows_per_page->{$_}||0 foreach @{$r->versions}; + print $self->tr('Rows/'.($t->vdm_pagesize()/1024). + 'kb Varsize DM Page', @r); + } + { my @r; + push @r, ($t->vdm_needed->{$_}||0)/1024 foreach @{$r->versions}; + print $self->tr('Varsize DataMemory (KB)', @r); + } + { my @r; + push @r, $t->im_rows_per_page->{$_} foreach @{$r->versions}; + print $self->tr('Rows/'.($t->im_pagesize()/1024).'kb IM Page', @r); + } + { my @r; + push @r, $t->im_needed->{$_}/1024 foreach @{$r->versions}; + print $self->tr('IndexMemory (KB)', @r); + } + + print "</table><hr/>\n\n"; + } + + print $self->h1("Parameter Minimum Requirements"); + print $self->p("* indicates greater than default"); + print "<table>\n"; + print $self->th("Parameter",'Default',@{$r->versions}); + while( my ($pname, $p)= $r->parameters_each()) + { + my @r; + push @r, $p->value->{$_}. + (($p->value->{$_} > $p->default)?'*':'') + foreach @{$r->versions}; + + print $self->tr($pname.(($p->unit)?' ('.$p->unit.')':''), + $p->default, + @r); + } + print "</table></body></html>"; +} + +sub table +{ + my $self= shift; + my $t= shift; +} diff --git a/storage/ndb/tools/ndb_size.tmpl b/storage/ndb/tools/ndb_size.tmpl deleted file mode 100644 index 1e19ea132ba..00000000000 --- a/storage/ndb/tools/ndb_size.tmpl +++ /dev/null @@ -1,231 +0,0 @@ -<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> -<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> - <head> - <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/> - <meta name="keywords" content="MySQL Cluster" /> - <title>MySQL Cluster size estimate for <TMPL_VAR NAME="db" ESCAPE="HTML"></title> -<style type="text/css"> -table { border-collapse: collapse } -td,th { border: 1px solid black } -</style> - </head> -<body> -<h1>MySQL Cluster analysis for <TMPL_VAR NAME="db" escape="html"></h1> -<p>This is an automated analysis of the <TMPL_VAR NAME="DSN" escape="html"> database for migration into <a href="http://www.mysql.com/">MySQL</a> Cluster. No warranty is made to the accuracy of the information.</p> - -<p>This information should be valid for MySQL 4.1 and 5.0. Since 5.1 is not a final release yet, the numbers should be used as a guide only.</p> - -<p>5.1-dd is for tables stored on disk. The ndb_size.pl estimates are <b>experimental</b> and should not be trusted. Notably we don't take into account indexed columns being in DataMemory versus non-indexed on disk.</p> - -<h2>Parameter Settings</h2> -<p><b>NOTE</b> the configuration parameters below do not take into account system tables and other requirements.</p> -<table> - <tr> - <th>Parameter</th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel></th> - </TMPL_LOOP> - </tr> -<TMPL_LOOP NAME=Parameters> - <tr> - <td><TMPL_VAR NAME=name></td> - <TMPL_LOOP NAME=releases> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> - </tr> -</TMPL_LOOP> -</table> - -<h2>Memory usage because of parameters</h2> - -<p>Usage is in kilobytes. Actual usage will vary as you should set the parameters larger than those listed in the table above.</p> -<table> - <tr> - <th>Parameter</th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel></th> - </TMPL_LOOP> - </tr> -<TMPL_LOOP NAME=ParamMemory> - <tr> - <td><TMPL_VAR NAME=name></td> - <TMPL_LOOP NAME=releases> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> - </tr> -</TMPL_LOOP> -</table> - -<h2>Table List</h2> -<ul> -<TMPL_LOOP NAME="tables"> -<li><a href="#<TMPL_VAR NAME="table">"><TMPL_VAR NAME="table"></a></li> -</TMPL_LOOP> -</ul> - -<hr/> - -<TMPL_LOOP NAME="tables"> -<h2><a name="<TMPL_VAR NAME="table">"><TMPL_VAR NAME="table"></a></h2> -<table> - <tr> - <th>Column</th> - <th>Type</th> - <th>VARSIZE</th> - <th>Size</th> - <th>Key</th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel> NDB Size</th> - </TMPL_LOOP> - </tr> - <TMPL_LOOP NAME="columns"> - <tr> - <td><TMPL_VAR NAME=name></td> - <td><TMPL_VAR NAME=type></td> - <td><TMPL_IF NAME=is_varsize>YES<TMPL_ELSE> </TMPL_IF></td> - <td><TMPL_VAR NAME=size></td> - <td><TMPL_VAR NAME=key></td> - <TMPL_LOOP NAME=datamemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> - </tr> - </TMPL_LOOP> -</table> - -<p> </p> - -<h3>Indexes</h3> - -<p>We assume that indexes are ORDERED (not created USING HASH). If order is not required, 10 bytes of data memory can be saved per row if the index is created USING HASH</p> -<table> -<tr> - <th>Index</th> - <th>Type</th> - <th>Columns</th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel> IdxMem</th> - </TMPL_LOOP> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel> DatMem</th> - </TMPL_LOOP> -</tr> -<TMPL_LOOP NAME="indexes"> - <tr> - <td><TMPL_VAR NAME=name></td> - <td><TMPL_VAR NAME=type></td> - <td><TMPL_VAR NAME=columns></td> - <TMPL_LOOP NAME=indexmemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> - <TMPL_LOOP NAME=datamemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> - </tr> -</TMPL_LOOP> -</table> - -<h3>DataMemory Usage</h3> -<table> -<tr> - <th> </th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel></th> - </TMPL_LOOP> -</tr> -<tr> - <th>Nr Varsized Attributes</th> - <TMPL_LOOP NAME=nrvarsize> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Row Overhead</th> - <TMPL_LOOP NAME=RowOverhead> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Varsized Overhead</th> - <TMPL_LOOP NAME=nrvarsize_mem> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Column DataMemory/Row</th> - <TMPL_LOOP NAME=RowDataMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Index DataMemory/Row</th> - <TMPL_LOOP NAME=IndexDataMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Total DataMemory/Row</th> - <TMPL_LOOP NAME=TotalDataMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Rows per 32kb page</th> - <TMPL_LOOP NAME=RowPerPage> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Current number of rows</th> - <TMPL_LOOP NAME=count> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Total DataMemory (kb)</th> - <TMPL_LOOP NAME=DataMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -</table> - -<h3>IndexMemory Usage</h3> -<table> -<tr> - <th> </th> - <TMPL_LOOP NAME=releases> - <th><TMPL_VAR NAME=rel></th> - </TMPL_LOOP> -</tr> -<tr> - <th>IndexMemory/Row</th> - <TMPL_LOOP NAME=RowIndexMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Rows per 8kb page</th> - <TMPL_LOOP NAME=RowPerIndexPage> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Current number of rows</th> - <TMPL_LOOP NAME=count> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -<tr> - <th>Total IndexMemory (kb)</th> - <TMPL_LOOP NAME=IndexMemory> - <td><TMPL_VAR NAME=val></td> - </TMPL_LOOP> -</tr> -</table> - -<hr/> -</TMPL_LOOP> - -<p>This is the output of ndb_size.pl.</p> -</body> -</html> - diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index 15e442a4f35..a7d8a9d10d9 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -27,6 +27,7 @@ #include <NdbAutoPtr.hpp> #include "../../../../sql/ha_ndbcluster_tables.h" +extern NdbRecordPrintFormat g_ndbrecord_print_format; Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data @@ -298,6 +299,7 @@ RestoreMetaData::markSysTables() Uint32 i; for (i = 0; i < getNoOfTables(); i++) { TableS* table = allTables[i]; + table->m_local_id = i; const char* tableName = table->getTableName(); if ( // XXX should use type strcmp(tableName, "SYSTAB_0") == 0 || @@ -310,9 +312,11 @@ RestoreMetaData::markSysTables() "cluster_replication" -> "cluster" -> "mysql" */ strcmp(tableName, "cluster_replication/def/" OLD_NDB_APPLY_TABLE) == 0 || - strcmp(tableName, "cluster/def/" OLD_NDB_APPLY_TABLE) == 0 || + strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_APPLY_TABLE) == 0 || + strcmp(tableName, OLD_NDB_REP_DB "/def/" OLD_NDB_SCHEMA_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 ) + table->isSysTable = true; } for (i = 0; i < getNoOfTables(); i++) { @@ -330,6 +334,7 @@ RestoreMetaData::markSysTables() if (table->getTableId() == (Uint32) id1) { if (table->isSysTable) blobTable->isSysTable = true; + blobTable->m_main_table = table; break; } } @@ -427,6 +432,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfRecords= 0; backupVersion = version; isSysTable = false; + m_main_table = NULL; for (int i = 0; i < tableImpl->getNoOfColumns(); i++) createAttr(tableImpl->getColumn(i)); @@ -601,7 +607,10 @@ RestoreDataIterator::getNextTuple(int & res) attr_data->size = 4*sz; //if (m_currentTable->getTableId() >= 2) { ndbout << "fix i=" << i << " off=" << ptr-buf_ptr << " attrId=" << attrId << endl; } - + if(!m_hostByteOrder + && attr_desc->m_column->getType() == NdbDictionary::Column::Timestamp) + attr_data->u_int32_value[0] = Twiddle32(attr_data->u_int32_value[0]); + if(!Twiddle(attr_desc, attr_data)) { res = -1; @@ -658,6 +667,31 @@ RestoreDataIterator::getNextTuple(int & res) */ const Uint32 arraySize = sz / (attr_desc->size / 8); assert(arraySize <= attr_desc->arraySize); + + //convert the length of blob(v1) and text(v1) + if(!m_hostByteOrder + && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob + || attr_desc->m_column->getType() == NdbDictionary::Column::Text) + && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + + //convert datetime type + if(!m_hostByteOrder + && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize)) { res = -1; @@ -915,6 +949,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId) return false; } + info.setLevel(254); info << "_____________________________________________________" << endl << "Processing data in table: " << m_currentTable->getTableName() << "(" << Header.TableId << ") fragment " @@ -1172,14 +1207,14 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){ if (data.null) { - ndbout << "<NULL>"; + ndbout << g_ndbrecord_print_format.null_string; return ndbout; } NdbRecAttr tmprec(0); - tmprec.setup(desc.m_column, (char *)data.void_value); + tmprec.setup(desc.m_column, 0); tmprec.receive_data((Uint32*)data.void_value, data.size); - ndbout << tmprec; + ndbrecattr_print_formatted(ndbout, tmprec, g_ndbrecord_print_format); return ndbout; } @@ -1188,17 +1223,15 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){ NdbOut& operator<<(NdbOut& ndbout, const TupleS& tuple) { - ndbout << tuple.getTable()->getTableName() << "; "; for (int i = 0; i < tuple.getNoOfAttributes(); i++) { + if (i > 0) + ndbout << g_ndbrecord_print_format.fields_terminated_by; AttributeData * attr_data = tuple.getData(i); const AttributeDesc * attr_desc = tuple.getDesc(i); const AttributeS attr = {attr_desc, *attr_data}; debug << i << " " << attr_desc->m_column->getName(); ndbout << attr; - - if (i != (tuple.getNoOfAttributes() - 1)) - ndbout << delimiter << " "; } // for return ndbout; } diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 0517b8006ee..5455fa17aa0 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -25,8 +25,6 @@ #include <ndb_version.h> #include <version.h> -static const char * delimiter = ";"; // Delimiter in file dump - const int FileNameLenC = 256; const int TableNameLenC = 256; const int AttrNameLenC = 256; @@ -142,6 +140,8 @@ class TableS { Uint64 m_max_auto_val; bool isSysTable; + TableS *m_main_table; + Uint32 m_local_id; Uint64 m_noOfRecords; Vector<FragmentInfo *> m_fragmentInfo; @@ -156,6 +156,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getLocalId() const { + return m_local_id; + } Uint32 getNoOfRecords() const { return m_noOfRecords; } @@ -214,6 +217,9 @@ public: memcpy(&val.u32,data,4); v= val.u32; break; + case 24: + v= uint3korr((unsigned char*)data); + break; case 16: memcpy(&val.u16,data,2); v= val.u16; @@ -239,6 +245,10 @@ public: return isSysTable; } + const TableS *getMainTable() const { + return m_main_table; + } + TableS& operator=(TableS& org) ; }; // TableS; @@ -377,6 +387,7 @@ public: m_values_e.push_back(m_values[i]); m_values.clear(); } + LogEntry() {} ~LogEntry() { Uint32 i; diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index 36986ea30ba..4121cc4e1b4 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -24,6 +24,7 @@ extern const char *Ndb_apply_table; class BackupConsumer { public: + BackupConsumer() {} virtual ~BackupConsumer() { } virtual bool init() { return true;} virtual bool object(Uint32 tableType, const void*) { return true;} @@ -40,6 +41,7 @@ public: NODE_GROUP_MAP *m_nodegroup_map; uint m_nodegroup_map_len; virtual bool has_temp_error() {return false;} + virtual bool table_equal(const TableS &) {return true;} }; #endif diff --git a/storage/ndb/tools/restore/consumer_printer.cpp b/storage/ndb/tools/restore/consumer_printer.cpp index dabd77f7358..721e0332381 100644 --- a/storage/ndb/tools/restore/consumer_printer.cpp +++ b/storage/ndb/tools/restore/consumer_printer.cpp @@ -14,6 +14,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "consumer_printer.hpp" +extern FilteredNdbOut info; +extern NdbRecordPrintFormat g_ndbrecord_print_format; +extern const char *tab_path; bool BackupPrinter::table(const TableS & tab) @@ -21,7 +24,8 @@ BackupPrinter::table(const TableS & tab) if (m_print || m_print_meta) { m_ndbout << tab; - ndbout_c("Successfully printed table: %s", tab.m_dictTable->getName()); + info.setLevel(254); + info << "Successfully printed table: ", tab.m_dictTable->getName(); } return true; } @@ -31,7 +35,14 @@ BackupPrinter::tuple(const TupleS & tup, Uint32 fragId) { m_dataCount++; if (m_print || m_print_data) - m_ndbout << tup << endl; + { + if (m_ndbout.m_out == info.m_out) + { + info.setLevel(254); + info << tup.getTable()->getTableName() << "; "; + } + m_ndbout << tup << g_ndbrecord_print_format.lines_terminated_by; + } } void @@ -47,9 +58,10 @@ BackupPrinter::endOfLogEntrys() { if (m_print || m_print_log) { - ndbout << "Printed " << m_dataCount << " tuples and " - << m_logCount << " log entries" - << " to stdout." << endl; + info.setLevel(254); + info << "Printed " << m_dataCount << " tuples and " + << m_logCount << " log entries" + << " to stdout." << endl; } } bool diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 5b194fb7033..89f680a80e4 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -182,6 +182,7 @@ BackupRestore::finalize_table(const TableS & table){ } +#ifdef NOT_USED static bool default_nodegroups(NdbDictionary::Table *table) { Uint16 *node_groups = (Uint16*)table->getFragmentData(); @@ -197,6 +198,7 @@ static bool default_nodegroups(NdbDictionary::Table *table) } return true; } +#endif static Uint32 get_no_fragments(Uint64 max_rows, Uint32 no_nodes) @@ -422,13 +424,14 @@ error: bool BackupRestore::translate_frm(NdbDictionary::Table *table) { - const void *pack_data, *data, *new_pack_data; + uchar *pack_data, *data, *new_pack_data; char *new_data; - uint data_len, pack_len, new_data_len, new_pack_len; + uint new_data_len; + size_t data_len, new_pack_len; uint no_parts, extra_growth; DBUG_ENTER("translate_frm"); - pack_data = table->getFrmData(); + pack_data = (uchar*) table->getFrmData(); no_parts = table->getFragmentCount(); /* Add max 4 characters per partition to handle worst case @@ -440,7 +443,7 @@ bool BackupRestore::translate_frm(NdbDictionary::Table *table) { DBUG_RETURN(TRUE); } - if ((new_data = my_malloc(data_len + extra_growth, MYF(0)))) + if ((new_data = (char*) my_malloc(data_len + extra_growth, MYF(0)))) { DBUG_RETURN(TRUE); } @@ -449,7 +452,7 @@ bool BackupRestore::translate_frm(NdbDictionary::Table *table) my_free(new_data, MYF(0)); DBUG_RETURN(TRUE); } - if (packfrm((const void*)new_data, new_data_len, + if (packfrm((uchar*) new_data, new_data_len, &new_pack_data, &new_pack_len)) { my_free(new_data, MYF(0)); @@ -617,6 +620,7 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) return true; bool result= false; + unsigned apply_table_format= 0; m_ndb->setDatabaseName(NDB_REP_DB); m_ndb->setSchemaName("def"); @@ -629,8 +633,33 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) << dict->getNdbError() << endl; return false; } + if + (ndbtab->getColumn(0)->getType() == NdbDictionary::Column::Unsigned && + ndbtab->getColumn(1)->getType() == NdbDictionary::Column::Bigunsigned) + { + if (ndbtab->getNoOfColumns() == 2) + { + apply_table_format= 1; + } + else if + (ndbtab->getColumn(2)->getType() == NdbDictionary::Column::Varchar && + ndbtab->getColumn(3)->getType() == NdbDictionary::Column::Bigunsigned && + ndbtab->getColumn(4)->getType() == NdbDictionary::Column::Bigunsigned) + { + apply_table_format= 2; + } + } + if (apply_table_format == 0) + { + err << Ndb_apply_table << " has wrong format\n"; + return false; + } + Uint32 server_id= 0; Uint64 epoch= metaData.getStopGCP(); + Uint64 zero= 0; + char empty_string[1]; + empty_string[0]= 0; NdbTransaction * trans= m_ndb->startTransaction(); if (!trans) { @@ -653,6 +682,15 @@ BackupRestore::update_apply_status(const RestoreMetaData &metaData) << op->getNdbError() << endl; goto err; } + if ((apply_table_format == 2) && + (op->setValue(2u, (const char *)&empty_string, 1) || + op->setValue(3u, (const char *)&zero, sizeof(zero)) || + op->setValue(4u, (const char *)&zero, sizeof(zero)))) + { + err << Ndb_apply_table << ": " + << op->getNdbError() << endl; + goto err; + } if (trans->execute(NdbTransaction::Commit)) { err << Ndb_apply_table << ": " @@ -666,6 +704,66 @@ err: } bool +BackupRestore::table_equal(const TableS &tableS) +{ + if (!m_restore) + return true; + + const char *tablename = tableS.getTableName(); + + if(tableS.m_dictTable == NULL){ + ndbout<<"Table %s has no m_dictTable " << tablename << endl; + return false; + } + /** + * Ignore blob tables + */ + if(match_blob(tablename) >= 0) + return true; + + const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* tableS.m_dictTable); + if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ + return true; + } + + BaseString tmp(tablename); + Vector<BaseString> split; + if(tmp.split(split, "/") != 3){ + err << "Invalid table name format " << tablename << endl; + return false; + } + + m_ndb->setDatabaseName(split[0].c_str()); + m_ndb->setSchemaName(split[1].c_str()); + + NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); + const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); + if(tab == 0){ + err << "Unable to find table: " << split[2].c_str() << endl; + return false; + } + + if(tab->getNoOfColumns() != tableS.m_dictTable->getNoOfColumns()) + { + ndbout_c("m_columns.size %d != %d",tab->getNoOfColumns(), + tableS.m_dictTable->getNoOfColumns()); + return false; + } + + for(int i = 0; i<tab->getNoOfColumns(); i++) + { + if(!tab->getColumn(i)->equal(*(tableS.m_dictTable->getColumn(i)))) + { + ndbout_c("m_columns %s != %s",tab->getColumn(i)->getName(), + tableS.m_dictTable->getColumn(i)->getName()); + return false; + } + } + + return true; +} + +bool BackupRestore::createSystable(const TableS & tables){ if (!m_restore && !m_restore_meta && !m_restore_epoch) return true; @@ -716,7 +814,7 @@ BackupRestore::table(const TableS & table){ BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ - err << "Invalid table name format " << name << endl; + err << "Invalid table name format `" << name << "`" << endl; return false; } @@ -772,6 +870,22 @@ BackupRestore::table(const TableS & table){ copy.setFragmentData((const void *)ng_array, no_parts << 1); } + /** + * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1 + * Since default from mysqld is to add force of varpart (disable with + * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored + * from backups taken with older versions. This will be wrong if + * ROW_FORMAT=FIXED was used on original table, however the likelyhood of + * this is low, since ROW_FORMAT= was a NOOP in older versions. + */ + + if (table.getBackupVersion() < MAKE_VERSION(5,1,18)) + copy.setForceVarPart(true); + else if (getMajor(table.getBackupVersion()) == 6 && + (table.getBackupVersion() < MAKE_VERSION(6,1,7) || + table.getBackupVersion() == MAKE_VERSION(6,2,0))) + copy.setForceVarPart(true); + /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel @@ -781,10 +895,25 @@ BackupRestore::table(const TableS & table){ { copy.setMaxRows(table.getNoOfRecords()); } + + NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy); + if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){ + for(int i= 0; i < copy.getNoOfColumns(); i++) + { + NdbDictionary::Column::Type t = copy.getColumn(i)->getType(); + + if (t == NdbDictionary::Column::Varchar || + t == NdbDictionary::Column::Varbinary) + tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar); + if (t == NdbDictionary::Column::Longvarchar || + t == NdbDictionary::Column::Longvarbinary) + tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar); + } + } if (dict->createTable(copy) == -1) { - err << "Create table " << table.getTableName() << " failed: " + err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; if (dict->getNdbError().code == 771) { @@ -801,12 +930,13 @@ BackupRestore::table(const TableS & table){ } return false; } - info << "Successfully restored table " << table.getTableName()<< endl ; + info << "Successfully restored table `" + << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ - err << "Unable to find table: " << split[2].c_str() << endl; + err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } if(m_restore_meta) @@ -868,12 +998,15 @@ BackupRestore::endOfTables(){ for(size_t i = 0; i<m_indexes.size(); i++){ NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]); - BaseString tmp(indtab.m_primaryTable.c_str()); Vector<BaseString> split; - if(tmp.split(split, "/") != 3){ - err << "Invalid table name format " << indtab.m_primaryTable.c_str() - << endl; - return false; + { + BaseString tmp(indtab.m_primaryTable.c_str()); + if (tmp.split(split, "/") != 3) + { + err << "Invalid table name format `" << indtab.m_primaryTable.c_str() + << "`" << endl; + return false; + } } m_ndb->setDatabaseName(split[0].c_str()); @@ -881,39 +1014,41 @@ BackupRestore::endOfTables(){ const NdbDictionary::Table * prim = dict->getTable(split[2].c_str()); if(prim == 0){ - err << "Unable to find base table \"" << split[2].c_str() - << "\" for index " - << indtab.getName() << endl; + err << "Unable to find base table `" << split[2].c_str() + << "` for index `" + << indtab.getName() << "`" << endl; return false; } NdbTableImpl& base = NdbTableImpl::getImpl(*prim); NdbIndexImpl* idx; - int id; - char idxName[255], buf[255]; - if(sscanf(indtab.getName(), "%[^/]/%[^/]/%d/%s", - buf, buf, &id, idxName) != 4){ - err << "Invalid index name format " << indtab.getName() << endl; - return false; + Vector<BaseString> split_idx; + { + BaseString tmp(indtab.getName()); + if (tmp.split(split_idx, "/") != 4) + { + err << "Invalid index name format `" << indtab.getName() << "`" << endl; + return false; + } } if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base)) { - err << "Failed to create index " << idxName - << " on " << split[2].c_str() << endl; + err << "Failed to create index `" << split_idx[3] + << "` on " << split[2].c_str() << endl; return false; } - idx->setName(idxName); + idx->setName(split_idx[3].c_str()); if(dict->createIndex(* idx) != 0) { delete idx; - err << "Failed to create index " << idxName - << " on " << split[2].c_str() << endl + err << "Failed to create index `" << split_idx[3].c_str() + << "` on `" << split[2].c_str() << "`" << endl << dict->getNdbError() << endl; return false; } delete idx; - info << "Successfully created index " << idxName - << " on " << split[2].c_str() << endl; + info << "Successfully created index `" << split_idx[3].c_str() + << "` on `" << split[2].c_str() << "`" << endl; } return true; } @@ -1022,10 +1157,27 @@ void BackupRestore::tuple_a(restore_callback_t *cb) int size = attr_desc->size; int arraySize = attr_desc->arraySize; char * dataPtr = attr_data->string_value; - Uint32 length = attr_data->size; - + Uint32 length = 0; + + if (!attr_data->null) + { + const unsigned char * src = (const unsigned char *)dataPtr; + switch(attr_desc->m_column->getType()){ + case NdbDictionary::Column::Varchar: + case NdbDictionary::Column::Varbinary: + length = src[0] + 1; + break; + case NdbDictionary::Column::Longvarchar: + case NdbDictionary::Column::Longvarbinary: + length = src[0] + (src[1] << 8) + 2; + break; + default: + length = attr_data->size; + break; + } + } if (j == 0 && tup.getTable()->have_auto_inc(i)) - tup.getTable()->update_max_auto_val(dataPtr,size); + tup.getTable()->update_max_auto_val(dataPtr,size*arraySize); if (attr_desc->m_column->getPrimaryKey()) { @@ -1043,7 +1195,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb) if (ret < 0) { ndbout_c("Column: %d type %d %d %d %d",i, attr_desc->m_column->getType(), - size, arraySize, attr_data->size); + size, arraySize, length); break; } } @@ -1183,6 +1335,7 @@ BackupRestore::endOfTuples() tuple_free(); } +#ifdef NOT_USED static bool use_part_id(const NdbDictionary::Table *table) { if (table->getDefaultNoPartitionsFlag() && @@ -1191,6 +1344,7 @@ static bool use_part_id(const NdbDictionary::Table *table) else return true; } +#endif static Uint32 get_part_id(const NdbDictionary::Table *table, Uint32 hash_value) @@ -1279,7 +1433,7 @@ BackupRestore::logEntry(const LogEntry & tup) const char * dataPtr = attr->Data.string_value; if (tup.m_table->have_auto_inc(attr->Desc->attrId)) - tup.m_table->update_max_auto_val(dataPtr,size); + tup.m_table->update_max_auto_val(dataPtr,size*arraySize); const Uint32 length = (size / 8) * arraySize; if (attr->Desc->m_column->getPrimaryKey()) diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp index 5063619d3c3..8694cbffb0c 100644 --- a/storage/ndb/tools/restore/consumer_restore.hpp +++ b/storage/ndb/tools/restore/consumer_restore.hpp @@ -51,6 +51,7 @@ public: m_callback = 0; m_free_callback = 0; m_temp_error = false; + m_no_upgrade = false; m_transactions = 0; m_cache.m_old_table = 0; } @@ -73,6 +74,7 @@ public: virtual bool finalize_table(const TableS &); virtual bool has_temp_error(); virtual bool createSystable(const TableS & table); + virtual bool table_equal(const TableS & table); virtual bool update_apply_status(const RestoreMetaData &metaData); void connectToMysql(); bool map_in_frm(char *new_data, const char *data, @@ -90,6 +92,7 @@ public: bool m_restore_meta; bool m_no_restore_disk; bool m_restore_epoch; + bool m_no_upgrade; // for upgrade ArrayType from 5.0 backup file. Uint32 m_logCount; Uint32 m_dataCount; diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index 1f91c03d2cf..7db77524ad8 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -20,6 +20,7 @@ #include <NdbTCP.h> #include <NdbMem.h> #include <NdbOut.hpp> +#include <OutputStream.hpp> #include <NDBT_ReturnCodes.h> #include "consumer_restore.hpp" @@ -33,15 +34,26 @@ static int ga_nodeId = 0; static int ga_nParallelism = 128; static int ga_backupId = 0; static bool ga_dont_ignore_systab_0 = false; +static bool ga_no_upgrade = false; static Vector<class BackupConsumer *> g_consumers; +static BackupPrinter* g_printer = NULL; -static const char* ga_backupPath = "." DIR_SEPARATOR; +static const char* default_backupPath = "." DIR_SEPARATOR; +static const char* ga_backupPath = default_backupPath; static const char *opt_nodegroup_map_str= 0; static unsigned opt_nodegroup_map_len= 0; static NODE_GROUP_MAP opt_nodegroup_map[MAX_NODE_GROUP_MAPS]; #define OPT_NDB_NODEGROUP_MAP 'z' +const char *opt_ndb_database= NULL; +const char *opt_ndb_table= NULL; +unsigned int opt_verbose; +unsigned int opt_hex_format; +Vector<BaseString> g_databases; +Vector<BaseString> g_tables; +NdbRecordPrintFormat g_ndbrecord_print_format; + NDB_STD_OPTS_VARS; /** @@ -50,6 +62,7 @@ NDB_STD_OPTS_VARS; static bool ga_restore_epoch = false; static bool ga_restore = false; static bool ga_print = false; +static bool ga_skip_table_check = false; static int _print = 0; static int _print_meta = 0; static int _print_data = 0; @@ -61,66 +74,126 @@ BaseString g_options("ndb_restore"); const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 }; +enum ndb_restore_options { + OPT_PRINT= NDB_STD_OPTIONS_LAST, + OPT_PRINT_DATA, + OPT_PRINT_LOG, + OPT_PRINT_META, + OPT_BACKUP_PATH, + OPT_HEX_FORMAT, + OPT_FIELDS_ENCLOSED_BY, + OPT_FIELDS_TERMINATED_BY, + OPT_FIELDS_OPTIONALLY_ENCLOSED_BY, + OPT_LINES_TERMINATED_BY, + OPT_APPEND, + OPT_VERBOSE +}; +static const char *opt_fields_enclosed_by= NULL; +static const char *opt_fields_terminated_by= NULL; +static const char *opt_fields_optionally_enclosed_by= NULL; +static const char *opt_lines_terminated_by= NULL; + +static const char *tab_path= NULL; +static int opt_append; + static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_restore"), { "connect", 'c', "same as --connect-string", - (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, + (uchar**) &opt_connect_str, (uchar**) &opt_connect_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "nodeid", 'n', "Backup files from node with id", - (gptr*) &ga_nodeId, (gptr*) &ga_nodeId, 0, + (uchar**) &ga_nodeId, (uchar**) &ga_nodeId, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "backupid", 'b', "Backup id", - (gptr*) &ga_backupId, (gptr*) &ga_backupId, 0, + (uchar**) &ga_backupId, (uchar**) &ga_backupId, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_data", 'r', "Restore table data/logs into NDB Cluster using NDBAPI", - (gptr*) &_restore_data, (gptr*) &_restore_data, 0, + (uchar**) &_restore_data, (uchar**) &_restore_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_meta", 'm', "Restore meta data into NDB Cluster using NDBAPI", - (gptr*) &_restore_meta, (gptr*) &_restore_meta, 0, + (uchar**) &_restore_meta, (uchar**) &_restore_meta, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "no-upgrade", 'u', + "Don't upgrade array type for var attributes, which don't resize VAR data and don't change column attributes", + (uchar**) &ga_no_upgrade, (uchar**) &ga_no_upgrade, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-restore-disk-objects", 'd', "Dont restore disk objects (tablespace/logfilegroups etc)", - (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0, + (uchar**) &_no_restore_disk, (uchar**) &_no_restore_disk, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_epoch", 'e', "Restore epoch info into the status table. Convenient on a MySQL Cluster " "replication slave, for starting replication. The row in " NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.", - (gptr*) &ga_restore_epoch, (gptr*) &ga_restore_epoch, 0, + (uchar**) &ga_restore_epoch, (uchar**) &ga_restore_epoch, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "skip-table-check", 's', "Skip table structure check during restore of data", + (uchar**) &ga_skip_table_check, (uchar**) &ga_skip_table_check, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "No of parallel transactions during restore of data." "(parallelism can be 1 to 1024)", - (gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0, + (uchar**) &ga_nParallelism, (uchar**) &ga_nParallelism, 0, GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 }, - { "print", 256, "Print data and log to stdout", - (gptr*) &_print, (gptr*) &_print, 0, + { "print", OPT_PRINT, "Print metadata, data and log to stdout", + (uchar**) &_print, (uchar**) &_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_data", 257, "Print data to stdout", - (gptr*) &_print_data, (gptr*) &_print_data, 0, + { "print_data", OPT_PRINT_DATA, "Print data to stdout", + (uchar**) &_print_data, (uchar**) &_print_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_meta", 258, "Print meta data to stdout", - (gptr*) &_print_meta, (gptr*) &_print_meta, 0, + { "print_meta", OPT_PRINT_META, "Print meta data to stdout", + (uchar**) &_print_meta, (uchar**) &_print_meta, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "print_log", 259, "Print log to stdout", - (gptr*) &_print_log, (gptr*) &_print_log, 0, + { "print_log", OPT_PRINT_LOG, "Print log to stdout", + (uchar**) &_print_log, (uchar**) &_print_log, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "backup_path", 260, "Path to backup files", - (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0, + { "backup_path", OPT_BACKUP_PATH, "Path to backup files", + (uchar**) &ga_backupPath, (uchar**) &ga_backupPath, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "dont_ignore_systab_0", 'f', "Experimental. Do not ignore system table during restore.", - (gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0, + (uchar**) &ga_dont_ignore_systab_0, (uchar**) &ga_dont_ignore_systab_0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP, "Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)", - (gptr*) &opt_nodegroup_map_str, - (gptr*) &opt_nodegroup_map_str, + (uchar**) &opt_nodegroup_map_str, + (uchar**) &opt_nodegroup_map_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-enclosed-by", OPT_FIELDS_ENCLOSED_BY, + "Fields are enclosed by ...", + (uchar**) &opt_fields_enclosed_by, (uchar**) &opt_fields_enclosed_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-terminated-by", OPT_FIELDS_TERMINATED_BY, + "Fields are terminated by ...", + (uchar**) &opt_fields_terminated_by, + (uchar**) &opt_fields_terminated_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "fields-optionally-enclosed-by", OPT_FIELDS_OPTIONALLY_ENCLOSED_BY, + "Fields are optionally enclosed by ...", + (uchar**) &opt_fields_optionally_enclosed_by, + (uchar**) &opt_fields_optionally_enclosed_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "hex", OPT_HEX_FORMAT, "print binary types in hex format", + (uchar**) &opt_hex_format, (uchar**) &opt_hex_format, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "tab", 'T', "Creates tab separated textfile for each table to " + "given path. (creates .txt files)", + (uchar**) &tab_path, (uchar**) &tab_path, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + { "append", OPT_APPEND, "for --tab append data to file", + (uchar**) &opt_append, (uchar**) &opt_append, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "lines-terminated-by", OPT_LINES_TERMINATED_BY, "", + (uchar**) &opt_lines_terminated_by, (uchar**) &opt_lines_terminated_by, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + { "verbose", OPT_VERBOSE, + "verbosity", + (uchar**) &opt_verbose, (uchar**) &opt_verbose, 0, + GET_INT, REQUIRED_ARG, 1, 0, 255, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -255,20 +328,25 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #endif ndb_std_get_one_option(optid, opt, argument); switch (optid) { + case OPT_VERBOSE: + info.setThreshold(255-opt_verbose); + break; case 'n': if (ga_nodeId == 0) { - printf("Error in --nodeid,-n setting, see --help\n"); + err << "Error in --nodeid,-n setting, see --help"; exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info.setLevel(254); info << "Nodeid = " << ga_nodeId << endl; break; case 'b': if (ga_backupId == 0) { - printf("Error in --backupid,-b setting, see --help\n"); + err << "Error in --backupid,-b setting, see --help"; exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info.setLevel(254); info << "Backup Id = " << ga_backupId << endl; break; case OPT_NDB_NODEGROUP_MAP: @@ -277,6 +355,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), to nodegroup in new cluster. */ opt_nodegroup_map_len= 0; + + info.setLevel(254); info << "Analyse node group map" << endl; if (analyse_nodegroup_map(opt_nodegroup_map_str, &opt_nodegroup_map[0])) @@ -331,9 +411,9 @@ o verify nodegroup mapping exit(NDBT_ProgramExit(NDBT_WRONGARGS)); #endif - BackupPrinter* printer = new BackupPrinter(opt_nodegroup_map, - opt_nodegroup_map_len); - if (printer == NULL) + g_printer = new BackupPrinter(opt_nodegroup_map, + opt_nodegroup_map_len); + if (g_printer == NULL) return false; BackupRestore* restore = new BackupRestore(opt_nodegroup_map, @@ -341,7 +421,8 @@ o verify nodegroup mapping ga_nParallelism); if (restore == NULL) { - delete printer; + delete g_printer; + g_printer = NULL; return false; } @@ -349,22 +430,22 @@ o verify nodegroup mapping { ga_print = true; ga_restore = true; - printer->m_print = true; + g_printer->m_print = true; } if (_print_meta) { ga_print = true; - printer->m_print_meta = true; + g_printer->m_print_meta = true; } if (_print_data) { ga_print = true; - printer->m_print_data = true; + g_printer->m_print_data = true; } if (_print_log) { ga_print = true; - printer->m_print_log = true; + g_printer->m_print_log = true; } if (_restore_data) @@ -384,25 +465,75 @@ o verify nodegroup mapping restore->m_no_restore_disk = true; } + if (ga_no_upgrade) + { + restore->m_no_upgrade = true; + } + if (ga_restore_epoch) { restore->m_restore_epoch = true; } { - BackupConsumer * c = printer; + BackupConsumer * c = g_printer; g_consumers.push_back(c); } { BackupConsumer * c = restore; g_consumers.push_back(c); } - // Set backup file path - if (*pargv[0] != NULL) + for (;;) { - ga_backupPath = *pargv[0]; + int i= 0; + if (ga_backupPath == default_backupPath) + { + // Set backup file path + if ((*pargv)[i] == NULL) + break; + ga_backupPath = (*pargv)[i++]; + } + if ((*pargv)[i] == NULL) + break; + g_databases.push_back((*pargv)[i++]); + while ((*pargv)[i] != NULL) + { + g_tables.push_back((*pargv)[i++]); + } + break; } + info.setLevel(254); info << "backup path = " << ga_backupPath << endl; + if (g_databases.size() > 0) + { + info << "Restoring only from database " << g_databases[0].c_str() << endl; + if (g_tables.size() > 0) + info << "Restoring only tables:"; + for (unsigned i= 0; i < g_tables.size(); i++) + { + info << " " << g_tables[i].c_str(); + } + if (g_tables.size() > 0) + info << endl; + } + /* + the below formatting follows the formatting from mysqldump + do not change unless to adopt to changes in mysqldump + */ + g_ndbrecord_print_format.fields_enclosed_by= + opt_fields_enclosed_by ? opt_fields_enclosed_by : ""; + g_ndbrecord_print_format.fields_terminated_by= + opt_fields_terminated_by ? opt_fields_terminated_by : "\t"; + g_ndbrecord_print_format.fields_optionally_enclosed_by= + opt_fields_optionally_enclosed_by ? opt_fields_optionally_enclosed_by : ""; + g_ndbrecord_print_format.lines_terminated_by= + opt_lines_terminated_by ? opt_lines_terminated_by : "\n"; + if (g_ndbrecord_print_format.fields_optionally_enclosed_by[0] == '\0') + g_ndbrecord_print_format.null_string= "\\N"; + else + g_ndbrecord_print_format.null_string= ""; + g_ndbrecord_print_format.hex_prefix= ""; + g_ndbrecord_print_format.hex_format= opt_hex_format; return true; } @@ -427,6 +558,70 @@ checkSysTable(const RestoreMetaData& metaData, uint i) return checkSysTable(metaData[i]); } +static inline bool +isBlobTable(const TableS* table) +{ + return table->getMainTable() != NULL; +} + +static inline bool +isIndex(const TableS* table) +{ + const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table->m_dictTable); + return (int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined; +} + +static inline bool +checkDbAndTableName(const TableS* table) +{ + if (g_tables.size() == 0 && + g_databases.size() == 0) + return true; + if (g_databases.size() == 0) + g_databases.push_back("TEST_DB"); + + // Filter on the main table name for indexes and blobs + const char *table_name; + if (isBlobTable(table)) + table_name= table->getMainTable()->getTableName(); + else if (isIndex(table)) + table_name= + NdbTableImpl::getImpl(*table->m_dictTable).m_primaryTable.c_str(); + else + table_name= table->getTableName(); + + unsigned i; + for (i= 0; i < g_databases.size(); i++) + { + if (strncmp(table_name, g_databases[i].c_str(), + g_databases[i].length()) == 0 && + table_name[g_databases[i].length()] == '/') + { + // we have a match + if (g_databases.size() > 1 || g_tables.size() == 0) + return true; + break; + } + } + if (i == g_databases.size()) + return false; // no match found + + while (*table_name != '/') table_name++; + table_name++; + while (*table_name != '/') table_name++; + table_name++; + + for (i= 0; i < g_tables.size(); i++) + { + if (strcmp(table_name, g_tables[i].c_str()) == 0) + { + // we have a match + return true; + } + } + return false; +} + static void free_data_callback() { @@ -459,6 +654,10 @@ main(int argc, char** argv) g_options.appfmt(" -n %d", ga_nodeId); if (_restore_meta) g_options.appfmt(" -m"); + if (ga_no_upgrade) + g_options.appfmt(" -u"); + if (ga_skip_table_check) + g_options.appfmt(" -s"); if (_restore_data) g_options.appfmt(" -r"); if (ga_restore_epoch) @@ -468,7 +667,6 @@ main(int argc, char** argv) g_options.appfmt(" -p %d", ga_nParallelism); g_connect_string = opt_connect_str; - /** * we must always load meta data, even if we will only print it to stdout */ @@ -484,9 +682,10 @@ main(int argc, char** argv) const Uint32 version = tmp.NdbVersion; char buf[NDB_VERSION_STRING_BUF_SZ]; + info.setLevel(254); info << "Ndb version in backup files: " - << getVersionString(version, 0, buf, sizeof(buf)) << endl; - + << ndbGetVersionString(version, 0, buf, sizeof(buf)) << endl; + /** * check wheater we can restore the backup (right version). */ @@ -495,9 +694,9 @@ main(int argc, char** argv) if (version >= MAKE_VERSION(5,1,3) && version <= MAKE_VERSION(5,1,9)) { err << "Restore program incompatible with backup versions between " - << getVersionString(MAKE_VERSION(5,1,3), 0, buf, sizeof(buf)) + << ndbGetVersionString(MAKE_VERSION(5,1,3), 0, buf, sizeof(buf)) << " and " - << getVersionString(MAKE_VERSION(5,1,9), 0, buf, sizeof(buf)) + << ndbGetVersionString(MAKE_VERSION(5,1,9), 0, buf, sizeof(buf)) << endl; exitHandler(NDBT_FAILED); } @@ -554,27 +753,59 @@ main(int argc, char** argv) exitHandler(NDBT_FAILED); } } - debug << "Restoring tables" << endl; + + Vector<OutputStream *> table_output(metaData.getNoOfTables()); + debug << "Restoring tables" << endl; for(i = 0; i<metaData.getNoOfTables(); i++) { - if (checkSysTable(metaData, i)) + const TableS *table= metaData[i]; + table_output.push_back(NULL); + if (!checkDbAndTableName(table)) + continue; + if (checkSysTable(table)) { + if (!tab_path || isBlobTable(table) || isIndex(table)) + { + table_output[i]= ndbout.m_out; + } + else + { + FILE* res; + char filename[FN_REFLEN], tmp_path[FN_REFLEN]; + const char *table_name; + table_name= table->getTableName(); + while (*table_name != '/') table_name++; + table_name++; + while (*table_name != '/') table_name++; + table_name++; + convert_dirname(tmp_path, tab_path, NullS); + res= my_fopen(fn_format(filename, table_name, tmp_path, ".txt", 4), + opt_append ? + O_WRONLY|O_APPEND|O_CREAT : + O_WRONLY|O_TRUNC|O_CREAT, + MYF(MY_WME)); + if (res == 0) + { + exitHandler(NDBT_FAILED); + } + FileOutputStream *f= new FileOutputStream(res); + table_output[i]= f; + } for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->table(* metaData[i])) + if (!g_consumers[j]->table(* table)) { - err << "Restore: Failed to restore table: "; - err << metaData[i]->getTableName() << " ... Exiting " << endl; + err << "Restore: Failed to restore table: `"; + err << table->getTableName() << "` ... Exiting " << endl; exitHandler(NDBT_FAILED); } } else { for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->createSystable(* metaData[i])) + if (!g_consumers[j]->createSystable(* table)) { err << "Restore: Failed to restore system table: "; - err << metaData[i]->getTableName() << " ... Exiting " << endl; + err << table->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } - } } debug << "Close tables" << endl; @@ -589,6 +820,20 @@ main(int argc, char** argv) { if(_restore_data || _print_data) { + if (!ga_skip_table_check){ + for(i=0; i < metaData.getNoOfTables(); i++){ + if (checkSysTable(metaData, i)) + { + for(Uint32 j= 0; j < g_consumers.size(); j++) + if (!g_consumers[j]->table_equal(* metaData[i])) + { + err << "Restore: Failed to restore data, "; + err << metaData[i]->getTableName() << " table structure doesn't match backup ... Exiting " << endl; + exitHandler(NDBT_FAILED); + } + } + } + } RestoreDataIterator dataIter(metaData, &free_data_callback); // Read data file header @@ -604,9 +849,15 @@ main(int argc, char** argv) const TupleS* tuple; while ((tuple = dataIter.getNextTuple(res= 1)) != 0) { - if (checkSysTable(tuple->getTable())) - for(Uint32 i= 0; i < g_consumers.size(); i++) - g_consumers[i]->tuple(* tuple, fragmentId); + const TableS* table = tuple->getTable(); + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + OutputStream *tmp = ndbout.m_out; + ndbout.m_out = output; + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->tuple(* tuple, fragmentId); + ndbout.m_out = tmp; } // while (tuple != NULL); if (res < 0) @@ -648,9 +899,12 @@ main(int argc, char** argv) const LogEntry * logEntry = 0; while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0) { - if (checkSysTable(logEntry->m_table)) - for(Uint32 i= 0; i < g_consumers.size(); i++) - g_consumers[i]->logEntry(* logEntry); + const TableS* table = logEntry->m_table; + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->logEntry(* logEntry); } if (res < 0) { @@ -667,33 +921,33 @@ main(int argc, char** argv) { for(i = 0; i<metaData.getNoOfTables(); i++) { - if (checkSysTable(metaData, i)) - { - for(Uint32 j= 0; j < g_consumers.size(); j++) - if (!g_consumers[j]->finalize_table(* metaData[i])) - { - err << "Restore: Failed to finalize restore table: %s. "; - err << "Exiting... " << metaData[i]->getTableName() << endl; - exitHandler(NDBT_FAILED); - } - } + const TableS* table = metaData[i]; + OutputStream *output = table_output[table->getLocalId()]; + if (!output) + continue; + for(Uint32 j= 0; j < g_consumers.size(); j++) + if (!g_consumers[j]->finalize_table(*table)) + { + err << "Restore: Failed to finalize restore table: %s. "; + err << "Exiting... " << metaData[i]->getTableName() << endl; + exitHandler(NDBT_FAILED); + } } } } - if (ga_restore_epoch) { for (i= 0; i < g_consumers.size(); i++) if (!g_consumers[i]->update_apply_status(metaData)) { - err << "Restore: Failed to restore epoch" << endl; - return -1; + err << "Restore: Failed to restore epoch" << endl; + return -1; } } - for(Uint32 i= 0; i < g_consumers.size(); i++) + for(Uint32 j= 0; j < g_consumers.size(); j++) { - if (g_consumers[i]->has_temp_error()) + if (g_consumers[j]->has_temp_error()) { clearConsumers(); ndbout_c("\nRestore successful, but encountered temporary error, " @@ -702,7 +956,23 @@ main(int argc, char** argv) } clearConsumers(); - return NDBT_ProgramExit(NDBT_OK); + + for(i = 0; i < metaData.getNoOfTables(); i++) + { + if (table_output[i] && + table_output[i] != ndbout.m_out) + { + my_fclose(((FileOutputStream *)table_output[i])->getFile(), MYF(MY_WME)); + delete table_output[i]; + table_output[i] = NULL; + } + } + + if (opt_verbose) + return NDBT_ProgramExit(NDBT_OK); + else + return 0; } // main template class Vector<BackupConsumer*>; +template class Vector<OutputStream*>; diff --git a/storage/ndb/tools/select_all.cpp b/storage/ndb/tools/select_all.cpp index 84187894fb1..23d5f95f3f7 100644 --- a/storage/ndb/tools/select_all.cpp +++ b/storage/ndb/tools/select_all.cpp @@ -39,7 +39,7 @@ NDB_STD_OPTS_VARS; static const char* _dbname = "TEST_DB"; static const char* _delimiter = "\t"; -static int _unqualified, _header, _parallelism, _useHexFormat, _lock, +static int _header, _parallelism, _useHexFormat, _lock, _order, _descending; const char *load_default_groups[]= { "mysql_cluster",0 }; @@ -54,48 +54,49 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "parallelism", - (gptr*) &_parallelism, (gptr*) &_parallelism, 0, + (uchar**) &_parallelism, (uchar**) &_parallelism, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)", - (gptr*) &_lock, (gptr*) &_lock, 0, + (uchar**) &_lock, (uchar**) &_lock, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "order", 'o', "Sort resultset according to index", - (gptr*) &_order, (gptr*) &_order, 0, + (uchar**) &_order, (uchar**) &_order, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "descending", 'z', "Sort descending (requires order flag)", - (gptr*) &_descending, (gptr*) &_descending, 0, + (uchar**) &_descending, (uchar**) &_descending, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "header", 'h', "Print header", - (gptr*) &_header, (gptr*) &_header, 0, + (uchar**) &_header, (uchar**) &_header, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "useHexFormat", 'x', "Output numbers in hexadecimal format", - (gptr*) &_useHexFormat, (gptr*) &_useHexFormat, 0, + (uchar**) &_useHexFormat, (uchar**) &_useHexFormat, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "delimiter", 'D', "Column delimiter", - (gptr*) &_delimiter, (gptr*) &_delimiter, 0, + (uchar**) &_delimiter, (uchar**) &_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "disk", 256, "Dump disk ref", - (gptr*) &_dumpDisk, (gptr*) &_dumpDisk, 0, + (uchar**) &_dumpDisk, (uchar**) &_dumpDisk, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "rowid", 256, "Dump rowid", - (gptr*) &use_rowid, (gptr*) &use_rowid, 0, + (uchar**) &use_rowid, (uchar**) &use_rowid, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "gci", 256, "Dump gci", - (gptr*) &use_gci, (gptr*) &use_gci, 0, + (uchar**) &use_gci, (uchar**) &use_gci, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tupscan", 't', "Scan in tup order", - (gptr*) &_tup, (gptr*) &_tup, 0, + (uchar**) &_tup, (uchar**) &_tup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nodata", 256, "Dont print data", - (gptr*) &nodata, (gptr*) &nodata, 0, + (uchar**) &nodata, (uchar**) &nodata, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program reads all records from one table in NDB Cluster\n"\ @@ -103,6 +104,7 @@ static void usage() "(It only print error messages if it encounters a permanent error.)\n"\ "It can also be used to dump the content of a table to file \n"\ " ex: select_all --no-header --delimiter=';' T4 > T4.data\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -127,6 +129,7 @@ int main(int argc, char** argv){ } Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_select_all"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; diff --git a/storage/ndb/tools/select_count.cpp b/storage/ndb/tools/select_count.cpp index a133f7967f8..73982e886b5 100644 --- a/storage/ndb/tools/select_count.cpp +++ b/storage/ndb/tools/select_count.cpp @@ -43,21 +43,23 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + (uchar**) &_dbname, (uchar**) &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "parallelism", - (gptr*) &_parallelism, (gptr*) &_parallelism, 0, + (uchar**) &_parallelism, (uchar**) &_parallelism, 0, GET_INT, REQUIRED_ARG, 240, 0, 0, 0, 0, 0 }, { "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)", - (gptr*) &_lock, (gptr*) &_lock, 0, + (uchar**) &_lock, (uchar**) &_lock, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname1 ... tabnameN\n"\ "This program will count the number of records in tables\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -81,6 +83,7 @@ int main(int argc, char** argv){ } Ndb_cluster_connection con(opt_connect_str); + con.set_name("ndb_select_count"); if(con.connect(12, 5, 1) != 0) { ndbout << "Unable to connect to management server." << endl; diff --git a/storage/ndb/tools/waiter.cpp b/storage/ndb/tools/waiter.cpp index de8d15ac17a..a292ab9140a 100644 --- a/storage/ndb/tools/waiter.cpp +++ b/storage/ndb/tools/waiter.cpp @@ -46,17 +46,17 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "no-contact", 'n', "Wait for cluster no contact", - (gptr*) &_no_contact, (gptr*) &_no_contact, 0, + (uchar**) &_no_contact, (uchar**) &_no_contact, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "not-started", OPT_WAIT_STATUS_NOT_STARTED, "Wait for cluster not started", - (gptr*) &_not_started, (gptr*) &_not_started, 0, + (uchar**) &_not_started, (uchar**) &_not_started, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "single-user", OPT_WAIT_STATUS_SINGLE_USER, "Wait for cluster to enter single user mode", - (gptr*) &_single_user, (gptr*) &_single_user, 0, + (uchar**) &_single_user, (uchar**) &_single_user, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "timeout", 't', "Timeout to wait", - (gptr*) &_timeout, (gptr*) &_timeout, 0, + (uchar**) &_timeout, (uchar**) &_timeout, 0, GET_INT, REQUIRED_ARG, 120, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; |