diff options
author | unknown <baker@bk-internal.mysql.com> | 2006-11-28 22:22:52 +0100 |
---|---|---|
committer | unknown <baker@bk-internal.mysql.com> | 2006-11-28 22:22:52 +0100 |
commit | 19daf5d970740713b6f07d6a64d404f4d2ab8e7f (patch) | |
tree | 17cbf7ddda5df51cd125e1aa56feb16b130192da /storage | |
parent | dd7e49e0197f78051250ef1b4465c836dc068525 (diff) | |
parent | fe99bda872fd51b07e1ebb0dc506e26d195edec7 (diff) | |
download | mariadb-git-19daf5d970740713b6f07d6a64d404f4d2ab8e7f.tar.gz |
Merge bk-internal.mysql.com:/data0/bk/mysql-5.1
into bk-internal.mysql.com:/data0/bk/mysql-5.1-arch
sql/field.cc:
Auto merged
sql/mysql_priv.h:
Auto merged
sql/set_var.cc:
Auto merged
storage/federated/ha_federated.cc:
Auto merged
storage/myisammrg/ha_myisammrg.cc:
Auto merged
plugin/fulltext/plugin_example.c:
e
C
sql/field.h:
e
Diffstat (limited to 'storage')
97 files changed, 469 insertions, 393 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index cb701b07ed7..821dd3db03b 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -315,14 +315,12 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", - (long long unsigned)*rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", - (long long unsigned) check_point)); - DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", - (long long unsigned)*auto_increment)); - DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", - (long long unsigned)*forced_flushes)); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %lu", + (ulong) *auto_increment)); + DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %lu", + (ulong) *forced_flushes)); DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); @@ -375,14 +373,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", - (unsigned long long)rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", - (unsigned long long)check_point)); - DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu", - (unsigned long long)auto_increment)); - DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", - (unsigned long long)forced_flushes)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong) rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %lu", + (ulong) auto_increment)); + DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %lu", + (ulong) forced_flushes)); DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s", real_path)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); @@ -765,9 +761,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) DBUG_ENTER("ha_archive::real_write_row"); written= azwrite(writer, buf, table->s->reclength); - DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", - (uint32)written, - (uint32)table->s->reclength)); + DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", + (int) written, table->s->reclength)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; @@ -810,10 +805,11 @@ int ha_archive::write_row(byte *buf) int rc; byte *read_buf= NULL; ulonglong temp_auto; + byte *record= table->record[0]; DBUG_ENTER("ha_archive::write_row"); if (share->crashed) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -876,7 +872,8 @@ int ha_archive::write_row(byte *buf) while (!(get_row(&archive, read_buf))) { - if (!memcmp(read_buf + mfield->offset(), table->next_number_field->ptr, + if (!memcmp(read_buf + mfield->offset(record), + table->next_number_field->ptr, mfield->max_length())) { rc= HA_ERR_FOUND_DUPP_KEY; @@ -906,16 +903,8 @@ int ha_archive::write_row(byte *buf) */ for (Field **field=table->field ; *field ; field++) { - /* - Pack length will report 256 when you have 255 bytes - of data plus the single byte for length. - - Probably could have added a method to say the number - of bytes taken up by field for the length data. - */ - uint32 actual_length= (*field)->data_length() + - ((*field)->pack_length() > 256 ? 2 : 1); - + DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length())); + DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset(record)))); if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) { char *ptr= (*field)->ptr + actual_length; @@ -1054,8 +1043,7 @@ int ha_archive::rnd_init(bool scan) if (scan) { scan_rows= share->rows_recorded; - DBUG_PRINT("info", ("archive will retrieve %llu rows", - (unsigned long long)scan_rows)); + DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows)); stats.records= 0; /* @@ -1097,8 +1085,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) DBUG_ENTER("ha_archive::get_row"); read= azread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", read, - (unsigned long)table->s->reclength)); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read, + table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -1307,7 +1295,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) { Field *field= table->found_next_number_field; ulonglong auto_value= - (ulonglong) field->val_int((char*)(buf + field->offset())); + (ulonglong) field->val_int((char*)(buf + + field->offset(table->record[0]))); if (share->auto_increment_value < auto_value) stats.auto_increment_value= share->auto_increment_value= auto_value; @@ -1315,8 +1304,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded++; } } - DBUG_PRINT("info", ("recovered %llu archive rows", - (unsigned long long)share->rows_recorded)); + DBUG_PRINT("info", ("recovered %lu archive rows", (ulong) share->rows_recorded)); my_free((char*)buf, MYF(0)); if (rc && rc != HA_ERR_END_OF_FILE) diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index dea1525b4ea..693544dde36 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -552,8 +552,8 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) if (share->scheme) { DBUG_PRINT("info", - ("error: parse_url. Returning error code %d \ - freeing share->scheme %lx", error_num, share->scheme)); + ("error: parse_url. Returning error code %d freeing share->scheme 0x%lx", + error_num, (long) share->scheme)); my_free((gptr) share->scheme, MYF(0)); share->scheme= 0; } @@ -619,7 +619,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, MYF(0)); share->connect_string_length= table->s->connect_string.length; - DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); + DBUG_PRINT("info",("parse_url alloced share->scheme 0x%lx", (long) share->scheme)); /* remove addition of null terminator and store length @@ -1742,7 +1742,7 @@ void ha_federated::update_auto_increment(void) thd->first_successful_insert_id_in_cur_stmt= mysql->last_used_con->insert_id; - DBUG_PRINT("info",("last_insert_id %d", stats.auto_increment_value)); + DBUG_PRINT("info",("last_insert_id: %ld", (long) stats.auto_increment_value)); DBUG_VOID_RETURN; } @@ -1848,6 +1848,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin); + byte *record= table->record[0]; DBUG_ENTER("ha_federated::update_row"); /* set string lengths to 0 to avoid misc chars in string @@ -1906,7 +1907,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) bool needs_quote= (*field)->str_needs_quotes(); where_string.append(STRING_WITH_LEN(" = ")); (*field)->val_str(&field_value, - (char*) (old_data + (*field)->offset())); + (char*) (old_data + (*field)->offset(record))); if (needs_quote) where_string.append('\''); field_value.print(&where_string); @@ -2014,8 +2015,8 @@ int ha_federated::delete_row(const byte *buf) stats.deleted+= (ha_rows)mysql->affected_rows; stats.records-= (ha_rows)mysql->affected_rows; DBUG_PRINT("info", - ("rows deleted %d rows deleted for all time %d", - int(mysql->affected_rows), stats.deleted)); + ("rows deleted %ld rows deleted for all time %ld", + (long) mysql->affected_rows, (long) stats.deleted)); DBUG_RETURN(0); } @@ -2148,7 +2149,7 @@ error: int ha_federated::index_init(uint keynr, bool sorted) { DBUG_ENTER("ha_federated::index_init"); - DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name.str, keynr)); active_index= keynr; DBUG_RETURN(0); } diff --git a/storage/heap/_check.c b/storage/heap/_check.c index cc832f8ed5b..c861fdb582f 100644 --- a/storage/heap/_check.c +++ b/storage/heap/_check.c @@ -88,7 +88,8 @@ int heap_check_heap(HP_INFO *info, my_bool print_status) if (records != share->records || deleted != share->deleted) { DBUG_PRINT("error",("Found rows: %lu (%lu) deleted %lu (%lu)", - records, share->records, deleted, share->deleted)); + records, (ulong) share->records, + deleted, (ulong) share->deleted)); error= 1; } *info= save_info; @@ -100,9 +101,9 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, ulong blength, my_bool print_status) { int error; - uint i,found,max_links,seek,links; - uint rec_link; /* Only used with debugging */ - uint hash_buckets_found; + ulong i,found,max_links,seek,links; + ulong rec_link; /* Only used with debugging */ + ulong hash_buckets_found; HASH_INFO *hash_info; error=0; @@ -123,7 +124,9 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, blength, records)) != i) { - DBUG_PRINT("error",("Record in wrong link: Link %d Record: 0x%lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); + DBUG_PRINT("error", + ("Record in wrong link: Link %lu Record: 0x%lx Record-link %lu", + i, (long) hash_info->ptr_to_rec, rec_link)); error=1; } else @@ -141,18 +144,18 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, if (keydef->hash_buckets != hash_buckets_found) { DBUG_PRINT("error",("Found %ld buckets, stats shows %ld buckets", - hash_buckets_found, keydef->hash_buckets)); + hash_buckets_found, (long) keydef->hash_buckets)); error=1; } DBUG_PRINT("info", - ("records: %ld seeks: %d max links: %d hitrate: %.2f " - "buckets: %d", + ("records: %ld seeks: %lu max links: %lu hitrate: %.2f " + "buckets: %lu", records,seek,max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found)); if (print_status) - printf("Key: %d records: %ld seeks: %d max links: %d " - "hitrate: %.2f buckets: %d\n", + printf("Key: %d records: %ld seeks: %lu max links: %lu " + "hitrate: %.2f buckets: %lu\n", keynr, records, seek, max_links, (float) seek / (float) (records ? records : 1), hash_buckets_found); @@ -180,8 +183,8 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, key_length, SEARCH_FIND | SEARCH_SAME, not_used)) { error= 1; - DBUG_PRINT("error",("Record in wrong link: key: %d Record: 0x%lx\n", - keynr, recpos)); + DBUG_PRINT("error",("Record in wrong link: key: %u Record: 0x%lx\n", + keynr, (long) recpos)); } else found++; diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c index f18c5e7054c..2ef57624e77 100644 --- a/storage/heap/hp_delete.c +++ b/storage/heap/hp_delete.c @@ -24,7 +24,7 @@ int heap_delete(HP_INFO *info, const byte *record) HP_SHARE *share=info->s; HP_KEYDEF *keydef, *end, *p_lastinx; DBUG_ENTER("heap_delete"); - DBUG_PRINT("enter",("info: %lx record: 0x%lx",info,record)); + DBUG_PRINT("enter",("info: 0x%lx record: 0x%lx", (long) info, (long) record)); test_active(info); @@ -144,7 +144,7 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, info->current_hash_ptr=last_ptr; info->current_ptr = last_ptr ? last_ptr->ptr_to_rec : 0; DBUG_PRINT("info",("Corrected current_ptr to point at: 0x%lx", - info->current_ptr)); + (long) info->current_ptr)); } empty=pos; if (gpos) diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index 77f3cf6d80b..6a537906929 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -120,7 +120,7 @@ byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key, { switch (nextflag) { case 0: /* Search after key */ - DBUG_PRINT("exit",("found key at %d",pos->ptr_to_rec)); + DBUG_PRINT("exit", ("found key at 0x%lx", (long) pos->ptr_to_rec)); info->current_hash_ptr=pos; DBUG_RETURN(info->current_ptr= pos->ptr_to_rec); case 1: /* Search next */ diff --git a/storage/heap/hp_open.c b/storage/heap/hp_open.c index fd937229b0d..f50478c8b3d 100644 --- a/storage/heap/hp_open.c +++ b/storage/heap/hp_open.c @@ -64,7 +64,8 @@ HP_INFO *heap_open(const char *name, int mode) info->opt_flag= READ_CHECK_USED; /* Check when changing */ #endif DBUG_PRINT("exit",("heap: 0x%lx reclength: %d records_in_block: %d", - info,share->reclength,share->block.records_in_block)); + (long) info, share->reclength, + share->block.records_in_block)); DBUG_RETURN(info); } @@ -82,7 +83,7 @@ HP_SHARE *hp_find_named_heap(const char *name) info= (HP_SHARE*) pos->data; if (!strcmp(name, info->name)) { - DBUG_PRINT("exit", ("Old heap_database: 0x%lx",info)); + DBUG_PRINT("exit", ("Old heap_database: 0x%lx", (long) info)); DBUG_RETURN(info); } } diff --git a/storage/heap/hp_rkey.c b/storage/heap/hp_rkey.c index f5f22a877a1..f02d44cc456 100644 --- a/storage/heap/hp_rkey.c +++ b/storage/heap/hp_rkey.c @@ -23,7 +23,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; DBUG_ENTER("heap_rkey"); - DBUG_PRINT("enter",("base: 0x%lx inx: %d",info,inx)); + DBUG_PRINT("enter",("info: 0x%lx inx: %d", (long) info, inx)); if ((uint) inx >= share->keys) { diff --git a/storage/heap/hp_rrnd.c b/storage/heap/hp_rrnd.c index 4daa3a06377..2f8556484a4 100644 --- a/storage/heap/hp_rrnd.c +++ b/storage/heap/hp_rrnd.c @@ -29,7 +29,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: 0x%lx pos: %lx",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %lx",(long) info, (long) pos)); info->lastinx= -1; if (!(info->current_ptr= pos)) @@ -44,7 +44,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); + DBUG_PRINT("exit", ("found record at 0x%lx", (long) info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index bc94e3bfae4..c83ae65c966 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -68,7 +68,7 @@ int heap_write(HP_INFO *info, const byte *record) DBUG_RETURN(0); err: - DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef)); + DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef))); info->errkey= keydef - share->keydef; if (keydef->algorithm == HA_KEY_ALG_BTREE) { @@ -138,7 +138,7 @@ static byte *next_free_record_pos(HP_SHARE *info) pos=info->del_link; info->del_link= *((byte**) pos); info->deleted--; - DBUG_PRINT("exit",("Used old position: 0x%lx",pos)); + DBUG_PRINT("exit",("Used old position: 0x%lx",(long) pos)); DBUG_RETURN(pos); } if (!(block_pos=(info->records % info->block.records_in_block))) @@ -153,9 +153,9 @@ static byte *next_free_record_pos(HP_SHARE *info) DBUG_RETURN(NULL); info->data_length+=length; } - DBUG_PRINT("exit",("Used new position: %lx", - (byte*) info->block.level_info[0].last_blocks+block_pos* - info->block.recbuffer)); + DBUG_PRINT("exit",("Used new position: 0x%lx", + (long) ((byte*) info->block.level_info[0].last_blocks+ + block_pos * info->block.recbuffer))); DBUG_RETURN((byte*) info->block.level_info[0].last_blocks+ block_pos*info->block.recbuffer); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 83266a22255..4ca767f811d 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4445,7 +4445,7 @@ ha_innobase::rnd_pos( } if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); DBUG_RETURN(error); } @@ -4455,7 +4455,7 @@ ha_innobase::rnd_pos( error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); } change_active_index(keynr); @@ -6630,7 +6630,7 @@ innodb_mutex_show_status( mutex->count_spin_rounds, mutex->count_os_wait, mutex->count_os_yield, - (ulong) mutex->lspent_time/1000); + (ulong) (mutex->lspent_time/1000)); if (stat_print(thd, innobase_hton_name, hton_name_len, buf1, buf1len, @@ -6660,7 +6660,7 @@ innodb_mutex_show_status( rw_lock_count, rw_lock_count_spin_loop, rw_lock_count_spin_rounds, rw_lock_count_os_wait, rw_lock_count_os_yield, - (ulong) rw_lock_wait_time/1000); + (ulong) (rw_lock_wait_time/1000)); if (stat_print(thd, innobase_hton_name, hton_name_len, STRING_WITH_LEN("rw_lock_mutexes"), buf2, buf2len)) { diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c index a4acb0cd485..c4d051ec771 100644 --- a/storage/innobase/os/os0file.c +++ b/storage/innobase/os/os0file.c @@ -1733,7 +1733,7 @@ os_file_set_size( } /* Print about progress for each 100 MB written */ - if ((current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024) + if ((ib_longlong) (current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024) != current_size / (ib_longlong)(100 * 1024 * 1024)) { fprintf(stderr, " %lu00", diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 85968110a77..f407f62fa0c 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -609,7 +609,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag&= ~T_RETRY_WITHOUT_QUICK; sql_print_information("Retrying repair of: '%s' without quick", - table->s->path); + table->s->path.str); continue; } param.testflag&= ~T_QUICK; @@ -617,7 +617,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP; sql_print_information("Retrying repair of: '%s' with keycache", - table->s->path); + table->s->path.str); continue; } break; @@ -629,7 +629,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) sql_print_information("Found %s of %s rows when repairing '%s'", llstr(file->state->records, llbuff), llstr(start_records, llbuff2), - table->s->path); + table->s->path.str); } return error; } @@ -1157,7 +1157,7 @@ bool ha_myisam::check_and_repair(THD *thd) // Don't use quick if deleted rows if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK)) check_opt.flags|=T_QUICK; - sql_print_warning("Checking table: '%s'",table->s->path); + sql_print_warning("Checking table: '%s'",table->s->path.str); old_query= thd->query; old_query_length= thd->query_length; @@ -1168,7 +1168,7 @@ bool ha_myisam::check_and_repair(THD *thd) if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) { - sql_print_warning("Recovering table: '%s'",table->s->path); + sql_print_warning("Recovering table: '%s'",table->s->path.str); check_opt.flags= ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) | (marked_crashed ? 0 : T_QUICK) | @@ -1460,6 +1460,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, bool found_real_auto_increment=0; enum ha_base_keytype type; char buff[FN_REFLEN]; + byte *record; KEY *pos; MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo,*recinfo_pos; @@ -1564,6 +1565,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, found_real_auto_increment= share->next_number_key_offset == 0; } + record= table_arg->record[0]; recpos=0; recinfo_pos=recinfo; while (recpos < (uint) share->reclength) { @@ -1573,7 +1575,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, for (field=table_arg->field ; *field ; field++) { - if ((fieldpos=(*field)->offset()) >= recpos && + if ((fieldpos=(*field)->offset(record)) >= recpos && fieldpos <= minpos) { /* skip null fields */ @@ -1587,7 +1589,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } } DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", - found,recpos,minpos,length)); + (long) found, recpos, minpos, length)); if (recpos != minpos) { // Reserved space (Null bits?) bzero((char*) recinfo_pos,sizeof(*recinfo_pos)); diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 7ad938c06a7..6e9108e8731 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -37,7 +37,7 @@ extern ulong myisam_recover_options; class ha_myisam: public handler { MI_INFO *file; - ulong int_table_flags; + ulonglong int_table_flags; char *data_file_name, *index_file_name; bool can_enable_indexes; int repair(THD *thd, MI_CHECK ¶m, bool optimize); diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c index 9865ac72d62..0b7f98f4d06 100644 --- a/storage/myisam/mi_close.c +++ b/storage/myisam/mi_close.c @@ -28,8 +28,9 @@ int mi_close(register MI_INFO *info) int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); - DBUG_PRINT("enter",("base: %lx reopen: %u locks: %u", - info,(uint) share->reopen, (uint) share->tot_locks)); + DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", + (long) info, (uint) share->reopen, + (uint) share->tot_locks)); pthread_mutex_lock(&THR_LOCK_myisam); if (info->lock_type == F_EXTRA_LCK) diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c index 85cc60bdd9d..471420d99c0 100644 --- a/storage/myisam/mi_delete.c +++ b/storage/myisam/mi_delete.c @@ -165,7 +165,7 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo, DBUG_PRINT("error",("Couldn't allocate memory")); DBUG_RETURN(my_errno=ENOMEM); } - DBUG_PRINT("info",("root_page: %ld",old_root)); + DBUG_PRINT("info",("root_page: %ld", (long) old_root)); if (!_mi_fetch_keypage(info,keyinfo,old_root,DFLT_INIT_HITS,root_buff,0)) { error= -1; @@ -410,7 +410,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, MYISAM_SHARE *share=info->s; MI_KEY_PARAM s_temp; DBUG_ENTER("del"); - DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", leaf_page, + DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page, (ulong) keypos)); DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff)); @@ -597,7 +597,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, else { /* Page is full */ endpos=anc_buff+anc_length; - DBUG_PRINT("test",("anc_buff: %lx endpos: %lx",anc_buff,endpos)); + DBUG_PRINT("test",("anc_buff: 0x%lx endpos: 0x%lx", + (long) anc_buff, (long) endpos)); if (keypos != anc_buff+2+key_reflength && !_mi_get_last_key(info,keyinfo,anc_buff,anc_key,keypos,&length)) goto err; @@ -775,7 +776,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, int s_length; uchar *start; DBUG_ENTER("remove_key"); - DBUG_PRINT("enter",("keypos: %lx page_end: %lx",keypos,page_end)); + DBUG_PRINT("enter",("keypos: 0x%lx page_end: 0x%lx",(long) keypos, (long) page_end)); start=keypos; if (!(keyinfo->flag & diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c index 8d45333137e..5f0c26a8607 100644 --- a/storage/myisam/mi_dynrec.c +++ b/storage/myisam/mi_dynrec.c @@ -1240,8 +1240,8 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, err: my_errno= HA_ERR_WRONG_IN_RECORD; - DBUG_PRINT("error",("to_end: %lx -> %lx from_end: %lx -> %lx", - to,to_end,from,from_end)); + DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx", + (long) to, (long) to_end, (long) from, (long) from_end)); DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length); DBUG_RETURN(MY_FILE_ERROR); } /* _mi_rec_unpack */ diff --git a/storage/myisam/mi_keycache.c b/storage/myisam/mi_keycache.c index b4122089d1d..bf69a6233f9 100644 --- a/storage/myisam/mi_keycache.c +++ b/storage/myisam/mi_keycache.c @@ -54,8 +54,8 @@ int mi_assign_to_key_cache(MI_INFO *info, int error= 0; MYISAM_SHARE* share= info->s; DBUG_ENTER("mi_assign_to_key_cache"); - DBUG_PRINT("enter",("old_key_cache_handle: %lx new_key_cache_handle: %lx", - share->key_cache, key_cache)); + DBUG_PRINT("enter",("old_key_cache_handle: 0x%lx new_key_cache_handle: 0x%lx", + (long) share->key_cache, (long) key_cache)); /* Skip operation if we didn't change key cache. This can happen if we diff --git a/storage/myisam/mi_page.c b/storage/myisam/mi_page.c index a5e2b01ed0f..33921c09a68 100644 --- a/storage/myisam/mi_page.c +++ b/storage/myisam/mi_page.c @@ -27,7 +27,7 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo, uchar *tmp; uint page_size; DBUG_ENTER("_mi_fetch_keypage"); - DBUG_PRINT("enter",("page: %ld",page)); + DBUG_PRINT("enter",("page: %ld", (long) page)); tmp=(uchar*) key_cache_read(info->s->key_cache, info->s->kfile, page, level, (byte*) buff, @@ -80,7 +80,7 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_errno=EINVAL; DBUG_RETURN((-1)); } - DBUG_PRINT("page",("write page at: %lu",(long) page,buff)); + DBUG_PRINT("page",("write page at: %lu",(long) page)); DBUG_DUMP("buff",(byte*) buff,mi_getint(buff)); #endif diff --git a/storage/myisam/mi_rsamepos.c b/storage/myisam/mi_rsamepos.c index c4bd5fa16fa..d2dba64b0fd 100644 --- a/storage/myisam/mi_rsamepos.c +++ b/storage/myisam/mi_rsamepos.c @@ -33,7 +33,8 @@ int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos) DBUG_ENTER("mi_rsame_with_pos"); DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos)); - if (inx < -1 || inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx)) + if (inx < -1 || + (inx >= 0 && ! mi_is_key_active(info->s->state.key_map, inx))) { DBUG_RETURN(my_errno=HA_ERR_WRONG_INDEX); } diff --git a/storage/myisam/mi_statrec.c b/storage/myisam/mi_statrec.c index 70e63ef8ce1..b3ebeb24bad 100644 --- a/storage/myisam/mi_statrec.c +++ b/storage/myisam/mi_statrec.c @@ -254,8 +254,8 @@ int _mi_read_rnd_static_record(MI_INFO *info, byte *buf, if (filepos >= info->state->data_file_length) { DBUG_PRINT("test",("filepos: %ld (%ld) records: %ld del: %ld", - filepos/share->base.reclength,filepos, - info->state->records, info->state->del)); + (long) filepos/share->base.reclength, (long) filepos, + (long) info->state->records, (long) info->state->del)); fast_mi_writeinfo(info); DBUG_RETURN(my_errno=HA_ERR_END_OF_FILE); } diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index 7080875009b..32f2aac6859 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -351,7 +351,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo, my_bool was_last_key; my_off_t next_page, dupp_key_pos; DBUG_ENTER("w_search"); - DBUG_PRINT("enter",("page: %ld",page)); + DBUG_PRINT("enter",("page: %ld", (long) page)); search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY; if (!(temp_buff= (uchar*) my_alloca((uint) keyinfo->block_length+ @@ -474,7 +474,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *endpos, *prev_key; MI_KEY_PARAM s_temp; DBUG_ENTER("_mi_insert"); - DBUG_PRINT("enter",("key_pos: %lx",key_pos)); + DBUG_PRINT("enter",("key_pos: 0x%lx", (long) key_pos)); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,USE_WHOLE_KEY);); nod_flag=mi_test_if_nod(anc_buff); @@ -495,8 +495,8 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, { DBUG_PRINT("test",("t_length: %d ref_len: %d", t_length,s_temp.ref_length)); - DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %lx", - s_temp.n_ref_length,s_temp.n_length,s_temp.key)); + DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx", + s_temp.n_ref_length,s_temp.n_length, (long) s_temp.key)); } #endif if (t_length > 0) @@ -689,7 +689,8 @@ uchar *_mi_find_half_pos(uint nod_flag, MI_KEYDEF *keyinfo, uchar *page, } while (page < end); *return_key_length=length; *after_key=page; - DBUG_PRINT("exit",("returns: %lx page: %lx half: %lx",lastpos,page,end)); + DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx", + (long) lastpos, (long) page, (long) end)); DBUG_RETURN(lastpos); } /* _mi_find_half_pos */ @@ -744,7 +745,8 @@ static uchar *_mi_find_last_pos(MI_KEYDEF *keyinfo, uchar *page, } *return_key_length=last_length; *after_key=lastpos; - DBUG_PRINT("exit",("returns: %lx page: %lx end: %lx",prevpos,page,end)); + DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx", + (long) prevpos,(long) page,(long) end)); DBUG_RETURN(prevpos); } /* _mi_find_last_pos */ @@ -780,7 +782,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, next_page= _mi_kpos(info->s->base.key_reflength, father_key_pos+father_keylength); buff=info->buff; - DBUG_PRINT("test",("use right page: %lu",next_page)); + DBUG_PRINT("test",("use right page: %lu", (ulong) next_page)); } else { @@ -789,7 +791,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo, next_page= _mi_kpos(info->s->base.key_reflength,father_key_pos); /* Fix that curr_buff is to left */ buff=curr_buff; curr_buff=info->buff; - DBUG_PRINT("test",("use left page: %lu",next_page)); + DBUG_PRINT("test",("use left page: %lu", (ulong) next_page)); } /* father_key_pos ptr to parting key */ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff,0)) diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index be68ffbdc5a..98121cc6d90 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -1105,18 +1105,18 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) my_off_t total_count; char llbuf[32]; - DBUG_PRINT("info", ("column: %3u", count - huff_counts + 1)); + DBUG_PRINT("info", ("column: %3u", (uint) (count - huff_counts) + 1)); if (verbose >= 2) - VOID(printf("column: %3u\n", count - huff_counts + 1)); + VOID(printf("column: %3u\n", (uint) (count - huff_counts) + 1)); if (count->tree_buff) { DBUG_PRINT("info", ("number of distinct values: %u", - (count->tree_pos - count->tree_buff) / - count->field_length)); + (uint) ((count->tree_pos - count->tree_buff) / + count->field_length))); if (verbose >= 2) VOID(printf("number of distinct values: %u\n", - (count->tree_pos - count->tree_buff) / - count->field_length)); + (uint) ((count->tree_pos - count->tree_buff) / + count->field_length))); } total_count= 0; for (idx= 0; idx < 256; idx++) @@ -2036,7 +2036,7 @@ static void write_field_info(HUFF_COUNTS *counts, uint fields, uint trees) uint huff_tree_bits; huff_tree_bits=max_bit(trees ? trees-1 : 0); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); DBUG_PRINT("info", ("column types:")); DBUG_PRINT("info", ("FIELD_NORMAL 0")); DBUG_PRINT("info", ("FIELD_SKIP_ENDSPACE 1")); @@ -2048,12 +2048,12 @@ static void write_field_info(HUFF_COUNTS *counts, uint fields, uint trees) DBUG_PRINT("info", ("FIELD_ZERO 7")); DBUG_PRINT("info", ("FIELD_VARCHAR 8")); DBUG_PRINT("info", ("FIELD_CHECK 9")); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); DBUG_PRINT("info", ("pack type as a set of flags:")); DBUG_PRINT("info", ("PACK_TYPE_SELECTED 1")); DBUG_PRINT("info", ("PACK_TYPE_SPACE_FIELDS 2")); DBUG_PRINT("info", ("PACK_TYPE_ZERO_FILL 4")); - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) { VOID(printf("\n")); @@ -2126,7 +2126,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) return 0; } - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) VOID(printf("\n")); tree_no= 0; @@ -2137,7 +2137,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) if (huff_tree->tree_number == 0) continue; /* Deleted tree */ tree_no++; - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 3) VOID(printf("\n")); /* Count the total number of elements (byte codes or column values). */ @@ -2279,8 +2279,8 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) if (bits > 8 * sizeof(code)) { VOID(fflush(stdout)); - VOID(fprintf(stderr, "error: Huffman code too long: %u/%u\n", - bits, 8 * sizeof(code))); + VOID(fprintf(stderr, "error: Huffman code too long: %u/%lu\n", + bits, (ulong) (8 * sizeof(code)))); errors++; break; } @@ -2329,7 +2329,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees) } flush_bits(); } - DBUG_PRINT("info", ("")); + DBUG_PRINT("info", (" ")); if (verbose >= 2) VOID(printf("\n")); my_afree((gptr) packed_tree); @@ -2507,7 +2507,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) end_pos-=count->max_zero_fill; field_length-=count->max_zero_fill; - switch(count->field_type) { + switch (count->field_type) { case FIELD_SKIP_ZERO: if (!memcmp((byte*) start_pos,zero_string,field_length)) { diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 70b8131f002..9637173a3ad 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -92,7 +92,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) if (table->s->reclength != stats.mean_rec_length && stats.mean_rec_length) { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", + DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu", table->s->reclength, stats.mean_rec_length)); goto err; } diff --git a/storage/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c index ef7eeb9d4d9..2d6f9423de9 100644 --- a/storage/myisammrg/myrg_extra.c +++ b/storage/myisammrg/myrg_extra.c @@ -28,7 +28,7 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function, int error,save_error=0; MYRG_TABLE *file; DBUG_ENTER("myrg_extra"); - DBUG_PRINT("info",("function: %d",(ulong) function)); + DBUG_PRINT("info",("function: %lu", (ulong) function)); if (function == HA_EXTRA_CACHE) { diff --git a/storage/myisammrg/myrg_rkey.c b/storage/myisammrg/myrg_rkey.c index f87b264081e..8d3c0a4699a 100644 --- a/storage/myisammrg/myrg_rkey.c +++ b/storage/myisammrg/myrg_rkey.c @@ -87,8 +87,8 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key, mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table; mi->once_flags|= RRND_PRESERVE_LASTINX; - DBUG_PRINT("info", ("using table no: %d", - info->current_table - info->open_tables + 1)); + DBUG_PRINT("info", ("using table no: %u", + (uint) (info->current_table - info->open_tables) + 1)); DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length); DBUG_RETURN(_myrg_mi_read_record(mi,buf)); } diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index 86186929394..6e76840fc5f 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -47,17 +47,17 @@ inline int my_decimal_get_binary_size(uint precision, uint scale) #endif #define DTIMAP(x, y, z) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } #define DTIMAP2(x, y, z, u, v) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } #define DTIMAPS(x, y, z, u, v) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } #define DTIMAPB(x, y, z, u, v, l) \ - { DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - offsetof(x, l) } + { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ + my_offsetof(x, l) } #define DTIBREAK(x) \ { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } @@ -602,17 +602,17 @@ public: }; #define DFGIMAP(x, y, z) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } #define DFGIMAP2(x, y, z, u, v) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } #define DFGIMAPS(x, y, z, u, v) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } #define DFGIMAPB(x, y, z, u, v, l) \ - { DictFilegroupInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - offsetof(x, l) } + { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ + my_offsetof(x, l) } #define DFGIBREAK(x) \ { DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } diff --git a/storage/ndb/include/logger/LogHandler.hpp b/storage/ndb/include/logger/LogHandler.hpp index 8b9aa43d7a9..efb87bb3104 100644 --- a/storage/ndb/include/logger/LogHandler.hpp +++ b/storage/ndb/include/logger/LogHandler.hpp @@ -135,7 +135,7 @@ public: * * @param str the error string. */ - void setErrorStr(char* str); + void setErrorStr(const char* str); /** * Parse logstring parameters diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in index a427e5c820d..24e75f964a0 100644 --- a/storage/ndb/include/ndb_global.h.in +++ b/storage/ndb/include/ndb_global.h.in @@ -133,6 +133,12 @@ extern "C" { #define PATH_MAX 1024 #endif +#if defined(_lint) || defined(FORCE_INIT_OF_VARS) +#define LINT_SET_PTR = {0,0} +#else +#define LINT_SET_PTR +#endif + #ifndef MIN #define MIN(x,y) (((x)<(y))?(x):(y)) #endif diff --git a/storage/ndb/include/util/NdbOut.hpp b/storage/ndb/include/util/NdbOut.hpp index d85d5cc6305..911777be07d 100644 --- a/storage/ndb/include/util/NdbOut.hpp +++ b/storage/ndb/include/util/NdbOut.hpp @@ -106,7 +106,7 @@ inline NdbOut& dec(NdbOut& _NdbOut) { return _NdbOut.setHexFormat(0); } extern "C" -void ndbout_c(const char * fmt, ...); +void ndbout_c(const char * fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2); class FilteredNdbOut : public NdbOut { public: diff --git a/storage/ndb/include/util/SimpleProperties.hpp b/storage/ndb/include/util/SimpleProperties.hpp index bae91108518..f199790f416 100644 --- a/storage/ndb/include/util/SimpleProperties.hpp +++ b/storage/ndb/include/util/SimpleProperties.hpp @@ -167,13 +167,13 @@ public: class Writer { public: Writer() {} - virtual ~Writer() {} bool first(); bool add(Uint16 key, Uint32 value); bool add(Uint16 key, const char * value); bool add(Uint16 key, const void* value, int len); protected: + virtual ~Writer() {} virtual bool reset() = 0; virtual bool putWord(Uint32 val) = 0; virtual bool putWords(const Uint32 * src, Uint32 len) = 0; @@ -247,7 +247,6 @@ public: class SectionSegmentPool &); virtual ~SimplePropertiesSectionReader() {} - virtual void reset(); virtual bool step(Uint32 len); virtual bool getWord(Uint32 * dst); diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index f1c6084754f..ca1d2381693 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -115,8 +115,7 @@ void getTextNDBStopForced(QQQQ) { int sphase = theData[4]; int extra = theData[5]; getRestartAction(theData[1],action_str); - if (signal) - reason_str.appfmt(" Initiated by signal %d.", signum); + reason_str.appfmt(" Initiated by signal %d.", signum); if (error) { ndbd_exit_classification cl; diff --git a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 27fed22ac72..7410db44aa3 100644 --- a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -124,6 +124,9 @@ printABORT_BACKUP_ORD(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ sig->backupPtr, sig->backupId); return true; break; + case AbortBackupOrd::AbortScan: + case AbortBackupOrd::IncompatibleVersions: + return false; } return false; } diff --git a/storage/ndb/src/common/logger/LogHandler.cpp b/storage/ndb/src/common/logger/LogHandler.cpp index c11f962d4fb..47333f81812 100644 --- a/storage/ndb/src/common/logger/LogHandler.cpp +++ b/storage/ndb/src/common/logger/LogHandler.cpp @@ -164,9 +164,9 @@ LogHandler::getErrorStr() } void -LogHandler::setErrorStr(char* str) +LogHandler::setErrorStr(const char* str) { - m_errorStr= str; + m_errorStr= (char*) str; } bool diff --git a/storage/ndb/src/common/portlib/NdbMutex.c b/storage/ndb/src/common/portlib/NdbMutex.c index 4a170d87e5c..f0a1614ba8e 100644 --- a/storage/ndb/src/common/portlib/NdbMutex.c +++ b/storage/ndb/src/common/portlib/NdbMutex.c @@ -28,7 +28,7 @@ NdbMutex* NdbMutex_Create(void) DBUG_ENTER("NdbMutex_Create"); pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex)); - DBUG_PRINT("info",("NdbMem_Allocate 0x%lx",pNdbMutex)); + DBUG_PRINT("info",("NdbMem_Allocate 0x%lx", (long) pNdbMutex)); if (pNdbMutex == NULL) DBUG_RETURN(NULL); @@ -50,7 +50,7 @@ int NdbMutex_Destroy(NdbMutex* p_mutex) result = pthread_mutex_destroy(p_mutex); - DBUG_PRINT("info",("NdbMem_Free 0x%lx",p_mutex)); + DBUG_PRINT("info",("NdbMem_Free 0x%lx", (long) p_mutex)); NdbMem_Free(p_mutex); DBUG_RETURN(result); diff --git a/storage/ndb/src/common/portlib/NdbThread.c b/storage/ndb/src/common/portlib/NdbThread.c index b91e9c6a5b3..d6eea08b9f0 100644 --- a/storage/ndb/src/common/portlib/NdbThread.c +++ b/storage/ndb/src/common/portlib/NdbThread.c @@ -142,7 +142,7 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func, } pthread_attr_destroy(&thread_attr); - DBUG_PRINT("exit",("ret: %lx", tmpThread)); + DBUG_PRINT("exit",("ret: 0x%lx", (long) tmpThread)); DBUG_RETURN(tmpThread); } @@ -151,7 +151,7 @@ void NdbThread_Destroy(struct NdbThread** p_thread) { DBUG_ENTER("NdbThread_Destroy"); if (*p_thread != NULL){ - DBUG_PRINT("enter",("*p_thread: %lx", * p_thread)); + DBUG_PRINT("enter",("*p_thread: 0x%lx", (long) *p_thread)); free(* p_thread); * p_thread = 0; } diff --git a/storage/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp index 383456f1077..b2ee75e4754 100644 --- a/storage/ndb/src/common/transporter/Transporter.cpp +++ b/storage/ndb/src/common/transporter/Transporter.cpp @@ -39,8 +39,8 @@ Transporter::Transporter(TransporterRegistry &t_reg, int _byteorder, bool _compression, bool _checksum, bool _signalId) : m_s_port(s_port), remoteNodeId(rNodeId), localNodeId(lNodeId), - isServer(lNodeId==serverNodeId), isMgmConnection(_isMgmConnection), - m_packer(_signalId, _checksum), + isServer(lNodeId==serverNodeId), + m_packer(_signalId, _checksum), isMgmConnection(_isMgmConnection), m_type(_type), m_transporter_registry(t_reg) { diff --git a/storage/ndb/src/cw/cpcd/CPCD.hpp b/storage/ndb/src/cw/cpcd/CPCD.hpp index aecc43150c4..3a69a03aa3f 100644 --- a/storage/ndb/src/cw/cpcd/CPCD.hpp +++ b/storage/ndb/src/cw/cpcd/CPCD.hpp @@ -63,6 +63,7 @@ struct CPCEvent { struct EventSubscriber { virtual void report(const CPCEvent &) = 0; + virtual ~EventSubscriber() {} }; /** diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index d6b557424e7..819255a79f5 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -228,7 +228,7 @@ Backup::execCONTINUEB(Signal* signal) Uint32 tabPtr_I = Tdata2; Uint32 fragPtr_I = signal->theData[3]; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptr_I); TablePtr tabPtr; ptr.p->tables.getPtr(tabPtr, tabPtr_I); @@ -239,7 +239,7 @@ Backup::execCONTINUEB(Signal* signal) FragmentPtr fragPtr; tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; @@ -293,7 +293,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_UNDERFLOW: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); checkFile(signal, filePtr); return; @@ -302,7 +302,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_SCAN: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); checkScan(signal, filePtr); return; @@ -311,7 +311,7 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_FRAG_COMPLETE: { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, Tdata1); fragmentCompleted(signal, filePtr); return; @@ -320,16 +320,16 @@ Backup::execCONTINUEB(Signal* signal) case BackupContinueB::BUFFER_FULL_META: { jam(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, Tdata1); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); FsBuffer & buf = filePtr.p->operation.dataBuffer; if(buf.getFreeSize() + buf.getMinRead() < buf.getUsableSize()) { jam(); - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, Tdata2); DEBUG_OUT("Backup - Buffer full - " << buf.getFreeSize() @@ -344,7 +344,7 @@ Backup::execCONTINUEB(Signal* signal) return; }//if - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, Tdata2); GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend(); req->senderRef = reference(); @@ -416,7 +416,7 @@ Backup::execDUMP_STATE_ORD(Signal* signal) /** * Print records */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){ infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x", ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); @@ -870,6 +870,9 @@ Backup::checkNodeFail(Signal* signal, #endif Uint32 gsn, len, pos; + LINT_INIT(gsn); + LINT_INIT(len); + LINT_INIT(pos); ptr.p->nodes.bitANDC(mask); switch(ptr.p->masterData.gsn){ case GSN_DEFINE_BACKUP_REQ: @@ -1053,7 +1056,7 @@ Backup::execBACKUP_REQ(Signal* signal) void Backup::execUTIL_SEQUENCE_REF(Signal* signal) { - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; jamEntry(); UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr(); ptr.i = utilRef->senderData; @@ -1107,7 +1110,7 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) return; } - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = conf->senderData; c_backupPool.getPtr(ptr); @@ -1148,7 +1151,7 @@ Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ jamEntry(); ndbrequire(retVal == 0); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = ptrI; c_backupPool.getPtr(ptr); @@ -1169,7 +1172,7 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) /** * We now have both the mutexes */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; ptr.i = ptrI; c_backupPool.getPtr(ptr); @@ -1274,7 +1277,7 @@ Backup::execDEFINE_BACKUP_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -1291,7 +1294,7 @@ Backup::execDEFINE_BACKUP_CONF(Signal* signal) //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); if (ERROR_INSERTED(10024)) @@ -1469,7 +1472,7 @@ Backup::execCREATE_TRIG_CONF(Signal* signal) const TriggerEvent::Value type = conf->getTriggerEvent(); const Uint32 triggerId = conf->getTriggerId(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); /** @@ -1498,7 +1501,7 @@ Backup::execCREATE_TRIG_REF(Signal* signal) const Uint32 ptrI = ref->getConnectionPtr(); const Uint32 tableId = ref->getTableId(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); /** @@ -1613,7 +1616,7 @@ Backup::execSTART_BACKUP_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -1630,7 +1633,7 @@ Backup::execSTART_BACKUP_CONF(Signal* signal) //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); startBackupReply(signal, ptr, nodeId); @@ -1682,7 +1685,7 @@ Backup::execWAIT_GCP_REF(Signal* signal) WaitGCPRef * ref = (WaitGCPRef*)signal->getDataPtr(); const Uint32 ptrI = ref->senderData; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ndbrequire(ptr.p->masterRef == reference()); @@ -1706,7 +1709,7 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ const Uint32 ptrI = conf->senderData; const Uint32 gcp = conf->gcp; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ndbrequire(ptr.p->masterRef == reference()); @@ -1847,7 +1850,7 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) const Uint64 noOfRecords = conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->noOfBytes += noOfBytes; @@ -1921,7 +1924,7 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); TablePtr tabPtr; @@ -1974,7 +1977,7 @@ Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) BackupFragmentCompleteRep * rep = (BackupFragmentCompleteRep*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, rep->backupPtr); TablePtr tabPtr; @@ -2026,21 +2029,23 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) * Insert footers */ { - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); Uint32 * dst; + LINT_INIT(dst); ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1)); * dst = 0; filePtr.p->operation.dataBuffer.updateWritePtr(1); } { - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2; Uint32 * dst; + LINT_INIT(dst); ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz)); BackupFormat::CtlFile::GCPEntry * gcp = @@ -2110,10 +2115,10 @@ Backup::execDROP_TRIG_REF(Signal* signal) DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr(); const Uint32 ptrI = ref->getConnectionPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); - if(ref->getConf()->getTriggerId() != -1) + if(ref->getConf()->getTriggerId() != ~(Uint32) 0) { ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId(); ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl; @@ -2131,7 +2136,7 @@ Backup::execDROP_TRIG_CONF(Signal* signal) const Uint32 ptrI = conf->getConnectionPtr(); const Uint32 triggerId= conf->getTriggerId(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); dropTrigReply(signal, ptr); @@ -2170,7 +2175,7 @@ Backup::execSTOP_BACKUP_REF(Signal* signal) //const Uint32 backupId = ref->backupId; const Uint32 nodeId = ref->nodeId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); @@ -2205,7 +2210,7 @@ Backup::execSTOP_BACKUP_CONF(Signal* signal) //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->noOfLogBytes += conf->noOfLogBytes; @@ -2383,7 +2388,7 @@ Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); if (filePtr.p->m_flags & BackupFile::BF_LCP_META) { @@ -2435,7 +2440,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) DefineBackupReq* req = (DefineBackupReq*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; const Uint32 ptrI = req->backupPtr; const Uint32 backupId = req->backupId; const BlockReference senderRef = req->senderRef; @@ -2639,7 +2644,7 @@ Backup::execLIST_TABLES_CONF(Signal* signal) ListTablesConf* conf = (ListTablesConf*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, conf->senderData); const Uint32 len = signal->length() - ListTablesConf::HeaderLength; @@ -2694,7 +2699,7 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr) { jam(); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; FsOpenReq * req = (FsOpenReq *)signal->getDataPtrSend(); req->userReference = reference(); @@ -2756,10 +2761,10 @@ Backup::execFSOPENREF(Signal* signal) const Uint32 userPtr = ref->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, userPtr); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ptr.p->setErrorCode(ref->errorCode); openFilesReply(signal, ptr, filePtr); @@ -2775,11 +2780,11 @@ Backup::execFSOPENCONF(Signal* signal) const Uint32 userPtr = conf->userPointer; const Uint32 filePointer = conf->filePointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, userPtr); filePtr.p->filePointer = filePointer; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ndbrequire(! (filePtr.p->m_flags & BackupFile::BF_OPEN)); @@ -2960,10 +2965,10 @@ Backup::execGET_TABINFOREF(Signal* signal) GetTabInfoRef * ref = (GetTabInfoRef*)signal->getDataPtr(); const Uint32 senderData = ref->senderData; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; @@ -2987,7 +2992,7 @@ Backup::execGET_TABINFO_CONF(Signal* signal) const Uint32 tableType = conf->tableType; const Uint32 tableId = conf->tableId; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); SegmentedSectionPtr dictTabInfoPtr; @@ -2997,7 +3002,7 @@ Backup::execGET_TABINFO_CONF(Signal* signal) TablePtr tabPtr ; ndbrequire(findTable(ptr, tabPtr, tableId)); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); FsBuffer & buf = filePtr.p->operation.dataBuffer; Uint32* dst = 0; @@ -3263,7 +3268,7 @@ Backup::execDI_FCOUNTCONF(Signal* signal) ndbrequire(userPtr == RNIL && signal->length() == 5); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); TablePtr tabPtr; @@ -3278,7 +3283,7 @@ Backup::execDI_FCOUNTCONF(Signal* signal) fragPtr.p->scanning = 0; fragPtr.p->tableId = tableId; fragPtr.p->fragmentId = i; - fragPtr.p->node = RNIL; + fragPtr.p->node = 0; }//for /** @@ -3343,7 +3348,7 @@ Backup::execDIGETPRIMCONF(Signal* signal) ndbrequire(userPtr == RNIL && signal->length() == 9); ndbrequire(nodeCount > 0 && nodeCount <= MAX_REPLICAS); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, senderData); TablePtr tabPtr; @@ -3385,7 +3390,7 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) StartBackupReq* req = (StartBackupReq*)signal->getDataPtr(); const Uint32 ptrI = req->backupPtr; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(STARTED); @@ -3438,7 +3443,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) /** * Get backup record */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(SCANNING); @@ -3447,7 +3452,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) /** * Get file */ - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); ndbrequire(filePtr.p->backupPtr == ptrI); @@ -3591,12 +3596,12 @@ Backup::execTRANSID_AI(Signal* signal) //const Uint32 transId2 = signal->theData[2]; const Uint32 dataLen = signal->length() - 3; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; - TablePtr tabPtr; + TablePtr tabPtr LINT_SET_PTR; c_tablePool.getPtr(tabPtr, op.tablePtr); Table & table = * tabPtr.p; @@ -3782,7 +3787,7 @@ Backup::execSCAN_FRAGREF(Signal* signal) ScanFragRef * ref = (ScanFragRef*)signal->getDataPtr(); const Uint32 filePtrI = ref->senderData; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->errorCode = ref->errorCode; @@ -3801,7 +3806,7 @@ Backup::execSCAN_FRAGCONF(Signal* signal) ScanFragConf * conf = (ScanFragConf*)signal->getDataPtr(); const Uint32 filePtrI = conf->senderData; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; @@ -3842,7 +3847,7 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); if (ptr.p->is_lcp()) @@ -3873,7 +3878,7 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) void Backup::backupFragmentRef(Signal * signal, BackupFilePtr filePtr) { - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REF; @@ -3929,7 +3934,7 @@ Backup::checkScan(Signal* signal, BackupFilePtr filePtr) sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, 10000, ScanFragNextReq::SignalLength); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); ord->backupId = ptr.p->backupId; @@ -3960,7 +3965,7 @@ Backup::execFSAPPENDREF(Signal* signal) const Uint32 filePtrI = ref->userPointer; const Uint32 errCode = ref->errorCode; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; @@ -3980,7 +3985,7 @@ Backup::execFSAPPENDCONF(Signal* signal) const Uint32 filePtrI = signal->theData[0]; //conf->userPointer; const Uint32 bytes = signal->theData[1]; //conf->bytes; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); OperationRecord & op = filePtr.p->operation; @@ -4100,7 +4105,7 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) ndbrequire(flags & BackupFile::BF_OPEN); ndbrequire(flags & BackupFile::BF_FILE_THREAD); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); closeFile(signal, ptr, filePtr); } @@ -4117,8 +4122,8 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal) /* TUP asks if this trigger is to be fired on this node. */ - TriggerPtr trigPtr; - TablePtr tabPtr; + TriggerPtr trigPtr LINT_SET_PTR; + TablePtr tabPtr LINT_SET_PTR; FragmentPtr fragPtr; Uint32 trigger_id = signal->theData[0]; Uint32 frag_id = signal->theData[1]; @@ -4149,7 +4154,7 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { TrigAttrInfo * trg = (TrigAttrInfo*)signal->getDataPtr(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; c_triggerPool.getPtr(trigPtr, trg->getTriggerId()); ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online... @@ -4180,7 +4185,7 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { jam(); Uint32 save[TrigAttrInfo::StaticLength]; memcpy(save, signal->getDataPtr(), 4*TrigAttrInfo::StaticLength); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); @@ -4233,7 +4238,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) const Uint32 trI = trg->getTriggerId(); const Uint32 fragId = trg->fragId; - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; c_triggerPool.getPtr(trigPtr, trI); ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); @@ -4247,7 +4252,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) Uint32 len = trigPtr.p->logEntry->Length; trigPtr.p->logEntry->FragId = htonl(fragId); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); if(gci != ptr.p->currGCP) { @@ -4317,7 +4322,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) /** * Get backup record */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(STOPPING); @@ -4412,10 +4417,10 @@ Backup::execFSCLOSEREF(Signal* signal) FsRef * ref = (FsRef*)signal->getDataPtr(); const Uint32 filePtrI = ref->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); FsConf * conf = (FsConf*)signal->getDataPtr(); @@ -4432,7 +4437,7 @@ Backup::execFSCLOSECONF(Signal* signal) FsConf * conf = (FsConf*)signal->getDataPtr(); const Uint32 filePtrI = conf->userPointer; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, filePtrI); #ifdef DEBUG_ABORT @@ -4446,7 +4451,7 @@ Backup::execFSCLOSECONF(Signal* signal) filePtr.p->m_flags &= ~(Uint32)(BackupFile::BF_OPEN |BackupFile::BF_CLOSING); filePtr.p->operation.dataBuffer.reset(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); closeFiles(signal, ptr); } @@ -4468,7 +4473,7 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) conf->backupId = ptr.p->backupId; conf->backupPtr = ptr.i; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; if(ptr.p->logFilePtr != RNIL) { ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); @@ -4517,7 +4522,7 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) dumpUsedResources(); #endif - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; if(requestType == AbortBackupOrd::ClientAbort) { if (getOwnNodeId() != getMasterNodeId()) { jam(); @@ -4637,7 +4642,7 @@ Backup::dumpUsedResources() jam(); for(Uint32 j = 0; j<3; j++) { jam(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; if(tabPtr.p->triggerAllocated[j]) { jam(); c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); @@ -4672,7 +4677,7 @@ Backup::cleanup(Signal* signal, BackupRecordPtr ptr) tabPtr.p->fragments.release(); for(Uint32 j = 0; j<3; j++) { jam(); - TriggerPtr trigPtr; + TriggerPtr trigPtr LINT_SET_PTR; if(tabPtr.p->triggerAllocated[j]) { jam(); c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); @@ -4748,7 +4753,7 @@ Backup::execFSREMOVECONF(Signal* signal){ /** * Get backup record */ - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, ptrI); c_backups.release(ptr); } @@ -4762,7 +4767,7 @@ Backup::execLCP_PREPARE_REQ(Signal* signal) jamEntry(); LcpPrepareReq req = *(LcpPrepareReq*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, req.backupPtr); ptr.p->m_gsn = GSN_LCP_PREPARE_REQ; @@ -4822,7 +4827,7 @@ Backup::lcp_close_file_conf(Signal* signal, BackupRecordPtr ptr) ndbrequire(ptr.p->tables.first(tabPtr)); Uint32 tableId = tabPtr.p->tableId; - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); ndbrequire(filePtr.p->m_flags == 0); @@ -4878,7 +4883,7 @@ Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr) /** * Lcp file */ - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); ndbrequire(filePtr.p->m_flags == 0); filePtr.p->m_flags |= BackupFile::BF_OPENING; @@ -4901,7 +4906,7 @@ Backup::lcp_open_file_done(Signal* signal, BackupRecordPtr ptr) ndbrequire(ptr.p->tables.first(tabPtr)); tabPtr.p->fragments.getPtr(fragPtr, 0); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); ndbrequire(filePtr.p->m_flags == (BackupFile::BF_OPEN | BackupFile::BF_LCP_META)); @@ -4933,11 +4938,11 @@ Backup::execEND_LCPREQ(Signal* signal) { EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - BackupRecordPtr ptr; + BackupRecordPtr ptr LINT_SET_PTR; c_backupPool.getPtr(ptr, req->backupPtr); ndbrequire(ptr.p->backupId == req->backupId); - BackupFilePtr filePtr; + BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); ndbrequire(filePtr.p->m_flags == 0); diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index d5578a5c0c0..8a994db4fbc 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -971,10 +971,10 @@ void Dbacc::initOpRec(Signal* signal) Uint32 opbits = 0; opbits |= Treqinfo & 0x7; - opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_LOCK_MODE : 0; - opbits |= ((Treqinfo >> 4) & 0x3) ? Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= (dirtyReadFlag) ? Operationrec::OP_DIRTY_READ : 0; - opbits |= ((Treqinfo >> 31) & 0x1) ? Operationrec::OP_LOCK_REQ : 0; + opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0; + opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; + opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0; + opbits |= ((Treqinfo >> 31) & 0x1) ? (Uint32) Operationrec::OP_LOCK_REQ : 0; //operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3; operationRecPtr.p->fid = fragrecptr.p->myfid; @@ -6947,10 +6947,10 @@ void Dbacc::initScanOpRec(Signal* signal) Uint32 opbits = 0; opbits |= ZSCAN_OP; - opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_LOCK_MODE : 0; - opbits |= scanPtr.p->scanLockMode ? Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= scanPtr.p->scanReadCommittedFlag ? - Operationrec::OP_EXECUTED_DIRTY_READ : 0; + opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0; + opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; + opbits |= (scanPtr.p->scanReadCommittedFlag ? + (Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0); opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; operationRecPtr.p->userptr = RNIL; operationRecPtr.p->scanRecPtr = scanPtr.i; @@ -7700,6 +7700,7 @@ void Dbacc::putOverflowRecInFrag(Signal* signal) OverflowRecordPtr tpifPrevOverrecPtr; tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec; + LINT_INIT(tpifPrevOverrecPtr.p); tpifPrevOverrecPtr.i = RNIL; while (tpifNextOverrecPtr.i != RNIL) { ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord); @@ -7749,6 +7750,7 @@ void Dbacc::putRecInFreeOverdir(Signal* signal) OverflowRecordPtr tpfoPrevOverrecPtr; tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec; + LINT_INIT(tpfoPrevOverrecPtr.p); tpfoPrevOverrecPtr.i = RNIL; while (tpfoNextOverrecPtr.i != RNIL) { ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index a7865c356c8..1c305d74863 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -189,7 +189,7 @@ struct { &Dbdict::drop_undofile_prepare_start, 0, 0, 0, 0, - 0, 0 + 0, 0, 0 } }; @@ -13602,8 +13602,8 @@ Dbdict::getDictLockType(Uint32 lockType) static const DictLockType lt[] = { { DictLockReq::NodeRestartLock, BS_NODE_RESTART, "NodeRestart" } }; - for (int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { - if (lt[i].lockType == lockType) + for (unsigned int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { + if ((Uint32) lt[i].lockType == lockType) return <[i]; } return NULL; @@ -13755,7 +13755,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* signal) DictLockPtr lockPtr; c_dictLockQueue.getPtr(lockPtr, ord->lockPtr); - ndbrequire(lockPtr.p->lt->lockType == ord->lockType); + ndbrequire((Uint32) lockPtr.p->lt->lockType == ord->lockType); if (lockPtr.p->locked) { jam(); diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp index d3a4e72c3f0..9c66636980a 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp @@ -68,7 +68,7 @@ print_head(const char * filename, const SchemaFile * sf) if (! checkonly) { ndbout << "----- Schemafile: " << filename << " -----" << endl; ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d", - sizeof(sf->Magic), + (int) sizeof(sf->Magic), sf->Magic, sf->ByteOrder, version(sf->NdbVersion), diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 0e9157c38aa..1eee1badce3 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -2909,7 +2909,7 @@ Dbdih::nr_start_fragment(Signal* signal, } } - if (maxLcpIndex == ~0) + if (maxLcpIndex == ~ (Uint32) 0) { ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d", takeOverPtr.p->toStartingNode, @@ -5968,6 +5968,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){ break; default: ndbrequire(false); + lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning }//switch Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId; @@ -6892,6 +6893,8 @@ void Dbdih::execDIADDTABREQ(Signal* signal) Uint32 align; }; SegmentedSectionPtr fragDataPtr; + LINT_INIT(fragDataPtr.i); + LINT_INIT(fragDataPtr.sz); signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); copy((Uint32*)fragments, fragDataPtr); releaseSections(signal); @@ -6981,7 +6984,9 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr, TabRecordPtr tabPtr, Uint32 fragId){ jam(); const Uint32 fragCount = tabPtr.p->totalfragments; - ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL; + ReplicaRecordPtr replicaPtr; + LINT_INIT(replicaPtr.p); + replicaPtr.i = RNIL; FragmentstorePtr fragPtr; for(; fragId<fragCount; fragId++){ jam(); @@ -7541,7 +7546,11 @@ void Dbdih::execDI_FCOUNTREQ(Signal* signal) if(connectPtr.i == RNIL) ref->m_connectionData = RNIL; else + { + jam(); + ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); ref->m_connectionData = connectPtr.p->userpointer; + } ref->m_tableRef = tabPtr.i; ref->m_senderData = senderData; ref->m_error = DihFragCountRef::ErroneousTableState; @@ -11443,6 +11452,7 @@ Dbdih::findBestLogNode(CreateReplicaRecord* createReplica, { ConstPtr<ReplicaRecord> fblFoundReplicaPtr; ConstPtr<ReplicaRecord> fblReplicaPtr; + LINT_INIT(fblFoundReplicaPtr.p); /* --------------------------------------------------------------------- */ /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 9a7803efbec..53d7d98ae84 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -3417,9 +3417,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal) } else { - regTcPtr->operation = op == ZREAD_EX ? ZREAD : op; + regTcPtr->operation = (Operation_t) op == ZREAD_EX ? ZREAD : (Operation_t) op; regTcPtr->lockType = - op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; + op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; } CRASH_INSERTION2(5041, regTcPtr->simpleRead && @@ -18520,7 +18520,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal) do { ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage", + ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage %d", logFilePtr.p->fileNo, logFilePtr.i, logFilePtr.p->fileChangeState, diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 3fdd587afa5..af2925fa738 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -3194,7 +3194,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal, if (unlikely(version < NDBD_ROWID_VERSION)) { Uint32 op = regTcPtr->operation; - Uint32 lock = op == ZREAD_EX ? ZUPDATE : op == ZWRITE ? ZINSERT : op; + Uint32 lock = (Operation_t) op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; LqhKeyReq::setLockType(Tdata10, lock); } /* ---------------------------------------------------------------------- */ diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp index fc3419e694a..23edd212991 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp @@ -43,7 +43,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal) getFragmentrec(regFragPtr, frag_id, regTabPtr.p); ndbassert(regFragPtr.p != NULL); - if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~0)) + if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~ (Uint32) 0)) { Local_key tmp; tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp index a055b18888b..7959606b7f4 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp @@ -82,7 +82,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc) { ndbout << ptr << " "; } - ndbout_c(""); + ndbout_c(" "); } ndbout_c("page requests"); for(Uint32 i = 0; i<MAX_FREE_LIST; i++) @@ -95,7 +95,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc) { ndbout << ptr << " "; } - ndbout_c(""); + ndbout_c(" "); } ndbout_c("Extent matrix"); @@ -108,7 +108,7 @@ Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc) { ndbout << ptr << " "; } - ndbout_c(""); + ndbout_c(" "); } if (alloc.m_curr_extent_info_ptr_i != RNIL) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index d9710cc2549..d24483b8f1d 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -684,7 +684,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal) copyAttrinfo(regOperPtr, &cinBuffer[0]); Uint32 localkey = (pageid << MAX_TUPLES_BITS) + pageidx; - if(Roptype == ZINSERT && localkey == ~0) + if (Roptype == ZINSERT && localkey == ~ (Uint32) 0) { // No tuple allocatated yet goto do_insert; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp index 1ec7994dce7..1333e7be2de 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp @@ -284,4 +284,5 @@ Dbtup::alloc_fix_rowid(Fragrecord* regFragPtr, case ZEMPTY_MM: ndbrequire(false); } + return 0; /* purify: deadcode */ } diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 677eff53559..5d4115c1d2d 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -1066,6 +1066,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } + return false; } bool @@ -1485,6 +1486,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, terrorCode= ZAI_INCONSISTENCY_ERROR; return false; } + return false; } bool diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp index 4b5c0b791f9..da2321bdf6f 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp @@ -132,7 +132,7 @@ Dbtux::searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos.m_pos = hi; return true; } - if (hi < currNode.getOccup()) { + if ((uint) hi < currNode.getOccup()) { jam(); treePos.m_pos = hi; return true; diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index d4cc0cb89e8..dff41ab8bca 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -1809,11 +1809,11 @@ Lgman::execLCP_FRAG_ORD(Signal* signal) if(0) ndbout_c - ("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %d", + ("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %ld", ptr.p->m_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx, ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx, ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx, - (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); + (long) (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); } m_logfile_group_list.next(ptr); } diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 84e3279afaf..65d233a1a9d 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -1565,6 +1565,11 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) break; } + case StopRecord::SR_BLOCK_GCP_START_GCP: + case StopRecord::SR_WAIT_COMPLETE_GCP: + case StopRecord::SR_UNBLOCK_GCP_START_GCP: + case StopRecord::SR_CLUSTER_SHUTDOWN: + break; } } @@ -2326,7 +2331,7 @@ Ndbcntr::StopRecord::checkNodeFail(Signal* signal){ bool allNodesStopped = true; int i ; - for( i = 0; i< NdbNodeBitmask::Size; i++ ){ + for( i = 0; i < (int) NdbNodeBitmask::Size; i++ ){ if ( stopReq.nodes[i] != 0 ){ allNodesStopped = false; break; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index 362a462b081..b20f810d029 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -655,7 +655,7 @@ Ndbfs::createAsyncFile(){ // Print info about all open files for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%x): %s", i, file, file->isOpen()?"OPEN":"CLOSED"); + ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); } ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile"); } @@ -1130,7 +1130,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal) ndbout << "All files: " << endl; for (unsigned i = 0; i < theFiles.size(); i++){ AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED"); + ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); } } }//Ndbfs::execDUMP_STATE_ORD() diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index 15f056f70a9..88ea0122268 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -1188,7 +1188,7 @@ Pgman::process_lcp(Signal* signal) pl_hash.next(m_lcp_curr_bucket, iter); Uint32 loop = 0; while (iter.curr.i != RNIL && - m_lcp_outstanding < max_count && + m_lcp_outstanding < (Uint32) max_count && (loop ++ < 32 || iter.bucket == m_lcp_curr_bucket)) { Ptr<Page_entry>& ptr = iter.curr; @@ -2324,7 +2324,7 @@ Pgman::execDUMP_STATE_ORD(Signal* signal) if (signal->theData[0] == 11004) { - ndbout << "Dump LCP bucket m_lcp_outstanding: %d", m_lcp_outstanding; + ndbout << "Dump LCP bucket m_lcp_outstanding: " << m_lcp_outstanding; if (m_lcp_curr_bucket != ~(Uint32)0) { Page_hashlist::Iterator iter; diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 23152af2775..7be0943be5d 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -160,7 +160,7 @@ void Qmgr::execCONTINUEB(Signal* signal) BaseString tmp; tmp.append("Shutting down node as total restart time exceeds " " StartFailureTimeout as set in config file "); - if(c_restartFailureTimeout == ~0) + if(c_restartFailureTimeout == (Uint32) ~0) tmp.append(" 0 (inifinite)"); else tmp.appfmt(" %d", c_restartFailureTimeout); @@ -1339,7 +1339,7 @@ Qmgr::check_startup(Signal* signal) if (now < partial_timeout) { jam(); - signal->theData[1] = c_restartPartialTimeout == ~0 ? 2 : 3; + signal->theData[1] = c_restartPartialTimeout == (Uint32) ~0 ? 2 : 3; signal->theData[2] = Uint32((partial_timeout - now + 500) / 1000); report_mask.assign(wait); retVal = 0; @@ -1356,7 +1356,7 @@ Qmgr::check_startup(Signal* signal) case CheckNodeGroups::Partitioning: if (now < partitioned_timeout && result != CheckNodeGroups::Win) { - signal->theData[1] = c_restartPartionedTimeout == ~0 ? 4 : 5; + signal->theData[1] = c_restartPartionedTimeout == (Uint32) ~0 ? 4 : 5; signal->theData[2] = Uint32((partitioned_timeout - now + 500) / 1000); report_mask.assign(c_definedNodes); report_mask.bitANDC(c_start.m_starting_nodes); @@ -1403,6 +1403,7 @@ missing_nodegroup: " starting: %s (missing fs for: %s)", mask1, mask2); progError(__LINE__, NDBD_EXIT_SR_RESTARTCONFLICT, buf); + return 0; // Deadcode } void diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index b80bc88ec5b..0436347eeca 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -1137,7 +1137,7 @@ Restore::reorder_key(const KeyDescriptor* desc, } dst += sz; } - ndbassert((dst - Tmp) == len); + ndbassert((Uint32) (dst - Tmp) == len); memcpy(data, Tmp, 4*len); } diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 4b38ac0f5ff..92efca36a35 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1590,6 +1590,9 @@ Suma::execGET_TABINFOREF(Signal* signal){ break; case GetTabInfoRef::TableNameTooLong: ndbrequire(false); + break; + case GetTabInfoRef::NoFetchByName: + break; } if (do_resend_request) { @@ -4306,7 +4309,7 @@ Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr, // restarting suma will not respond to this until startphase 5 // since it is not until then data copying has been completed - DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u]", + DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u] %u", subbPtr.i, subPtr.p->m_subscriptionId, subPtr.p->m_subscriptionKey, diff --git a/storage/ndb/src/kernel/error/ErrorReporter.cpp b/storage/ndb/src/kernel/error/ErrorReporter.cpp index 6c8bb1fe615..e95cd5c132f 100644 --- a/storage/ndb/src/kernel/error/ErrorReporter.cpp +++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp @@ -185,6 +185,7 @@ ErrorReporter::handleAssert(const char* message, const char* file, int line, int childReportError(ec); NdbShutdown(s_errorHandlerShutdownType); + exit(1); // Deadcode } void diff --git a/storage/ndb/src/kernel/error/ErrorReporter.hpp b/storage/ndb/src/kernel/error/ErrorReporter.hpp index 0ec84190238..dffec14dff2 100644 --- a/storage/ndb/src/kernel/error/ErrorReporter.hpp +++ b/storage/ndb/src/kernel/error/ErrorReporter.hpp @@ -29,7 +29,7 @@ public: static void setErrorHandlerShutdownType(NdbShutdownType nst = NST_ErrorHandler); static void handleAssert(const char* message, const char* file, - int line, int ec = NDBD_EXIT_PRGERR); + int line, int ec = NDBD_EXIT_PRGERR) __attribute__((__noreturn__)); static void handleError(int faultID, const char* problemData, diff --git a/storage/ndb/src/kernel/error/ndbd_exit_codes.c b/storage/ndb/src/kernel/error/ndbd_exit_codes.c index 8ed3b3a5cbb..f6672c77fc9 100644 --- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c @@ -14,6 +14,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <ndb_global.h> #include <ndbd_exit_codes.h> typedef struct ErrStruct { diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index 81b87c818fb..48868309d25 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -191,7 +191,7 @@ Configuration::init(int argc, char** argv) } if (! (val > 0 && val < MAX_NDB_NODES)) { - ndbout_c("Invalid nodeid specified in nowait-nodes: %d : %s", + ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s", val, _nowait_nodes); exit(-1); } diff --git a/storage/ndb/src/kernel/vm/DLHashTable.hpp b/storage/ndb/src/kernel/vm/DLHashTable.hpp index 4f580f937b7..7469dda7917 100644 --- a/storage/ndb/src/kernel/vm/DLHashTable.hpp +++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp @@ -287,6 +287,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T> & ptr, const T & key) Uint32 i; T * p; Ptr<T> prev; + LINT_INIT(prev.p); prev.i = RNIL; i = hashValues[hv]; diff --git a/storage/ndb/src/kernel/vm/RWPool.hpp b/storage/ndb/src/kernel/vm/RWPool.hpp index c1f4abeed79..a4ad12b52cf 100644 --- a/storage/ndb/src/kernel/vm/RWPool.hpp +++ b/storage/ndb/src/kernel/vm/RWPool.hpp @@ -70,6 +70,7 @@ RWPool::getPtr(Uint32 i) return record; } handle_invalid_get_ptr(i); + return 0; /* purify: deadcode */ } #endif diff --git a/storage/ndb/src/kernel/vm/Rope.cpp b/storage/ndb/src/kernel/vm/Rope.cpp index 0c90d8f65d5..afe08e063a9 100644 --- a/storage/ndb/src/kernel/vm/Rope.cpp +++ b/storage/ndb/src/kernel/vm/Rope.cpp @@ -30,8 +30,8 @@ ConstRope::copy(char* buf) const { int ConstRope::compare(const char * str, size_t len) const { if(DEBUG_ROPE) - ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)", - head.used, head.firstItem, head.lastItem, str, len); + ndbout_c("ConstRope[ %d 0x%x 0x%x ]::compare(%s, %d)", + head.used, head.firstItem, head.lastItem, str, (int) len); Uint32 left = head.used > len ? len : head.used; Ptr<Segment> curr; curr.i = head.firstItem; @@ -60,7 +60,7 @@ ConstRope::compare(const char * str, size_t len) const { } } if(DEBUG_ROPE) - ndbout_c("ConstRope::compare(%s, %d) -> %d", str, len, head.used > len); + ndbout_c("ConstRope::compare(%s, %d) -> %d", str, (int) len, head.used > len); return head.used > len; } @@ -91,7 +91,7 @@ Rope::copy(char* buf) const { int Rope::compare(const char * str, size_t len) const { if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d)", str, len); + ndbout_c("Rope::compare(%s, %d)", str, (int) len); Uint32 left = head.used > len ? len : head.used; Ptr<Segment> curr; curr.i = head.firstItem; @@ -100,7 +100,7 @@ Rope::compare(const char * str, size_t len) const { int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize()); if(res != 0){ if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, len, + ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len, (const char*)curr.p->data, res); return res; } @@ -115,19 +115,19 @@ Rope::compare(const char * str, size_t len) const { int res = memcmp(str, (const char*)curr.p->data, left); if(res){ if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d) -> %d", str, len, res); + ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, res); return res; } } if(DEBUG_ROPE) - ndbout_c("Rope::compare(%s, %d) -> %d", str, len, head.used > len); + ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len); return head.used > len; } bool Rope::assign(const char * s, size_t len, Uint32 hash){ if(DEBUG_ROPE) - ndbout_c("Rope::assign(%s, %d, 0x%x)", s, len, hash); + ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash); m_hash = hash; head.used = (head.used + 3) / 4; release(); diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 4e01038d343..1d6676287e8 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -1930,6 +1930,7 @@ SimulatedBlock::xfrm_attr(Uint32 attrDesc, CHARSET_INFO* cs, { jam(); Uint32 len; + LINT_INIT(len); switch(array){ case NDB_ARRAYTYPE_SHORT_VAR: len = 1 + srcPtr[0]; diff --git a/storage/ndb/src/kernel/vm/TransporterCallback.cpp b/storage/ndb/src/kernel/vm/TransporterCallback.cpp index f315918b871..badd2af669c 100644 --- a/storage/ndb/src/kernel/vm/TransporterCallback.cpp +++ b/storage/ndb/src/kernel/vm/TransporterCallback.cpp @@ -56,7 +56,7 @@ const char *lookupConnectionError(Uint32 err) { int i= 0; while ((Uint32)connectionError[i].err != err && - (Uint32)connectionError[i].err != -1) + connectionError[i].err != -1) i++; return connectionError[i].text; } diff --git a/storage/ndb/src/kernel/vm/WOPool.hpp b/storage/ndb/src/kernel/vm/WOPool.hpp index 6b42218368c..ed0d09d2f04 100644 --- a/storage/ndb/src/kernel/vm/WOPool.hpp +++ b/storage/ndb/src/kernel/vm/WOPool.hpp @@ -115,6 +115,7 @@ WOPool::getPtr(Uint32 i) return record; } handle_invalid_get_ptr(i); + return 0; /* purify: deadcode */ } #endif diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp index 7b8795f7ecb..4de8f8ee479 100644 --- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp +++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp @@ -223,6 +223,10 @@ Ndbd_mem_manager::init(bool alloc_less_memory) InitChunk chunk; Uint32 remaining = pages - allocated; +#if defined(_lint) || defined(FORCE_INIT_OF_VARS) + memset((char*) &chunk, 0 , sizeof(chunk)); +#endif + if (do_malloc(pages - allocated, &chunk)) { Uint32 i = 0; diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index 7c5fafd2286..b64b24aa3cf 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -184,7 +184,7 @@ ndb_mgm_create_handle() h->mgmd_version_minor= -1; h->mgmd_version_build= -1; - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h)); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) h)); DBUG_RETURN(h); } @@ -201,7 +201,7 @@ int ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv) { DBUG_ENTER("ndb_mgm_set_connectstring"); - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle)); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) handle)); handle->cfg.~LocalConfig(); new (&(handle->cfg)) LocalConfig; if (!handle->cfg.init(mgmsrv, 0) || @@ -243,7 +243,7 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle) DBUG_ENTER("ndb_mgm_destroy_handle"); if(!handle) DBUG_VOID_RETURN; - DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle))); + DBUG_PRINT("info", ("handle: 0x%lx", (ulong) (* handle))); /** * important! only disconnect if connected * other code relies on this @@ -2544,8 +2544,8 @@ int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length) args.put("length", length); BaseString data_string; - for (int i = 0; i < length; i++) - data_string.appfmt(" %u", data[i]); + for (int i = 0; i < (int) length; i++) + data_string.appfmt(" %lu", (ulong) data[i]); args.put("data", data_string.c_str()); diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index 8bff874a97a..debf5343a90 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -598,7 +598,7 @@ static const char* helpTextDebug = ; #endif -struct { +struct st_cmd_help { const char *cmd; const char * help; }help_items[]={ @@ -1558,6 +1558,8 @@ CommandInterpreter::executeShow(char* parameters) case NDB_MGM_NODE_TYPE_UNKNOWN: ndbout << "Error: Unknown Node Type" << endl; return -1; + case NDB_MGM_NODE_TYPE_MAX: + break; /* purify: deadcode */ } } @@ -2393,7 +2395,7 @@ CommandInterpreter::executeEventReporting(int processId, Vector<BaseString> specs; tmp.split(specs, " "); - for (int i=0; i < specs.size(); i++) + for (int i=0; i < (int) specs.size(); i++) { Vector<BaseString> spec; specs[i].split(spec, "="); diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index 222b71dbaac..58369141ba3 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -830,7 +830,7 @@ MgmtSrvr::sendVersionReq(int v_nodeId, Uint32 &version, const char **address) case GSN_API_VERSION_CONF: { const ApiVersionConf * const conf = CAST_CONSTPTR(ApiVersionConf, signal->getDataPtr()); - assert(conf->nodeId == v_nodeId); + assert((int) conf->nodeId == v_nodeId); version = conf->version; struct in_addr in; in.s_addr= conf->inet_addr; @@ -1568,7 +1568,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId, NodeBitmask nodes; nodes.clear(); Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId; - for(; nodeId <= max; nodeId++) + for(; (Uint32) nodeId <= max; nodeId++) { if (nodeTypes[nodeId] != NODE_TYPE_DB) continue; @@ -2075,8 +2075,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId, int log_event) { DBUG_ENTER("MgmtSrvr::alloc_node_id"); - DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d", - *nodeId, type, client_addr)); + DBUG_PRINT("enter", ("nodeid: %d type: %d client_addr: 0x%ld", + *nodeId, type, (long) client_addr)); if (g_no_nodeid_checks) { if (*nodeId == 0) { error_string.appfmt("no-nodeid-checks set in management server.\n" @@ -2495,7 +2495,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) const BackupCompleteRep * const rep = CAST_CONSTPTR(BackupCompleteRep, signal->getDataPtr()); #ifdef VM_TRACE - ndbout_c("Backup(%d) completed %d", rep->backupId); + ndbout_c("Backup(%d) completed", rep->backupId); #endif event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; @@ -2751,7 +2751,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value, break; case 1: res = i2.set(param, val_64); - ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32); + ndbout_c("Updating node %d param: %d to %u", node, param, val_32); break; case 2: res = i2.set(param, val_char); diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp index 80dd040eb1b..d3272fc9de2 100644 --- a/storage/ndb/src/mgmsrv/Services.cpp +++ b/storage/ndb/src/mgmsrv/Services.cpp @@ -1716,7 +1716,7 @@ MgmApiSession::report_event(Parser_t::Context &ctx, BaseString tmp(data_string); Vector<BaseString> item; tmp.split(item, " "); - for (int i = 0; i < length ; i++) + for (int i = 0; (Uint32) i < length ; i++) { sscanf(item[i].c_str(), "%u", data+i); } diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index b171457c2a9..4a865a0eb14 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -203,7 +203,7 @@ ClusterMgr::forceHB() int nodeId= 0; for(int i=0; - NodeBitmask::NotFound!=(nodeId= waitForHBFromNodes.find(i)); + (int) NodeBitmask::NotFound != (nodeId= waitForHBFromNodes.find(i)); i= nodeId+1) { #ifdef DEBUG_REG diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp index c06bb6fc62a..aa42c1a1bab 100644 --- a/storage/ndb/src/ndbapi/DictCache.cpp +++ b/storage/ndb/src/ndbapi/DictCache.cpp @@ -129,7 +129,8 @@ void GlobalDictCache::printCache() NdbElement_t<Vector<TableVersion> > * curr = m_tableHash.getNext(0); while(curr != 0){ DBUG_PRINT("curr", ("len: %d, hash: %d, lk: %d, str: %s", - curr->len, curr->hash, curr->localkey1, curr->str)); + curr->len, curr->hash, curr->localkey1, + (char*) curr->str)); if (curr->theData){ Vector<TableVersion> * vers = curr->theData; const unsigned sz = vers->size(); @@ -416,7 +417,7 @@ GlobalDictCache::alter_table_rep(const char * name, { TableVersion & ver = (* vers)[i]; if(ver.m_version == tableVersion && ver.m_impl && - ver.m_impl->m_id == tableId) + (Uint32) ver.m_impl->m_id == tableId) { ver.m_status = DROPPED; ver.m_impl->m_status = altered ? diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 15ef596deef..ca5fd07d724 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -342,8 +342,9 @@ Ndb::startTransaction(const NdbDictionary::Table *table, { NdbTransaction *trans= startTransactionLocal(0, nodeId); - DBUG_PRINT("exit",("start trans: 0x%x transid: 0x%llx", - trans, trans ? trans->getTransactionId() : 0)); + DBUG_PRINT("exit",("start trans: 0x%lx transid: 0x%lx", + (long) trans, + (long) (trans ? trans->getTransactionId() : 0))); DBUG_RETURN(trans); } } else { @@ -364,7 +365,7 @@ Ndb::hupp(NdbTransaction* pBuddyTrans) { DBUG_ENTER("Ndb::hupp"); - DBUG_PRINT("enter", ("trans: 0x%x",pBuddyTrans)); + DBUG_PRINT("enter", ("trans: 0x%lx", (long) pBuddyTrans)); Uint32 aPriority = 0; if (pBuddyTrans == NULL){ @@ -389,8 +390,9 @@ Ndb::hupp(NdbTransaction* pBuddyTrans) } pCon->setTransactionId(pBuddyTrans->getTransactionId()); pCon->setBuddyConPtr((Uint32)pBuddyTrans->getTC_ConnectPtr()); - DBUG_PRINT("exit", ("hupp trans: 0x%x transid: 0x%llx", - pCon, pCon ? pCon->getTransactionId() : 0)); + DBUG_PRINT("exit", ("hupp trans: 0x%lx transid: 0x%lx", + (long) pCon, + (long) (pCon ? pCon->getTransactionId() : 0))); DBUG_RETURN(pCon); } else { DBUG_RETURN(NULL); @@ -477,8 +479,9 @@ Ndb::closeTransaction(NdbTransaction* aConnection) tCon = theTransactionList; theRemainingStartTransactions++; - DBUG_PRINT("info",("close trans: 0x%x transid: 0x%llx", - aConnection, aConnection->getTransactionId())); + DBUG_PRINT("info",("close trans: 0x%lx transid: 0x%lx", + (long) aConnection, + (long) aConnection->getTransactionId())); DBUG_PRINT("info",("magic number: 0x%x TCConPtr: 0x%x theMyRef: 0x%x 0x%x", aConnection->theMagicNumber, aConnection->theTCConPtr, aConnection->theMyRef, getReference())); @@ -765,7 +768,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, TupleIdRange & range = info->m_tuple_id_range; if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong) tupleId)); DBUG_RETURN(0); } @@ -788,7 +791,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, TupleIdRange & range = info->m_tuple_id_range; if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -803,7 +806,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -816,7 +819,7 @@ Ndb::getTupleIdFromNdb(const NdbTableImpl* table, { assert(range.m_first_tuple_id < range.m_last_tuple_id); tupleId = ++range.m_first_tuple_id; - DBUG_PRINT("info", ("next cached value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId)); } else { @@ -853,7 +856,7 @@ Ndb::readAutoIncrementValue(const char* aTableName, TupleIdRange & range = info->m_tuple_id_range; if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -876,7 +879,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, TupleIdRange & range = info->m_tuple_id_range; if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -890,7 +893,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable, if (readTupleIdFromNdb(table, range, tupleId) == -1) DBUG_RETURN(-1); - DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId)); + DBUG_PRINT("info", ("value %lu", (ulong)tupleId)); DBUG_RETURN(0); } @@ -991,8 +994,8 @@ Ndb::setTupleIdInNdb(const NdbTableImpl* table, { range.m_first_tuple_id = tupleId - 1; DBUG_PRINT("info", - ("Setting next auto increment cached value to %llu", - (ulonglong)tupleId)); + ("Setting next auto increment cached value to %lu", + (ulong)tupleId)); DBUG_RETURN(0); } } @@ -1046,7 +1049,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, { DBUG_ENTER("Ndb::opTupleIdOnNdb"); Uint32 aTableId = table->m_id; - DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op)); + DBUG_PRINT("enter", ("table: %u value: %lu op: %u", + aTableId, (ulong) opValue, op)); NdbTransaction* tConnection = NULL; NdbOperation* tOperation = NULL; @@ -1114,8 +1118,8 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, else { DBUG_PRINT("info", - ("Setting next auto increment value (db) to %llu", - (ulonglong)opValue)); + ("Setting next auto increment value (db) to %lu", + (ulong) opValue)); range.m_first_tuple_id = range.m_last_tuple_id = opValue - 1; } break; @@ -1241,9 +1245,9 @@ int Ndb::setDatabaseAndSchemaName(const NdbDictionary::Table* t) if (s2 && s2 != s1 + 1) { char buf[NAME_LEN + 1]; if (s1 - s0 <= NAME_LEN && s2 - (s1 + 1) <= NAME_LEN) { - sprintf(buf, "%.*s", s1 - s0, s0); + sprintf(buf, "%.*s", (int) (s1 - s0), s0); setDatabaseName(buf); - sprintf(buf, "%.*s", s2 - (s1 + 1), s1 + 1); + sprintf(buf, "%.*s", (int) (s2 - (s1 + 1)), s1 + 1); setDatabaseSchemaName(buf); return 0; } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 42ef7bbbaee..dca1432d18a 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -3583,7 +3583,7 @@ NdbDictInterface::createEvent(class Ndb & ndb, evnt.mi_type = evntConf->getEventType(); evnt.setTable(dataPtr); } else { - if (evnt.m_tableImpl->m_id != evntConf->getTableId() || + if ((Uint32) evnt.m_tableImpl->m_id != evntConf->getTableId() || evnt.m_tableImpl->m_version != evntConf->getTableVersion() || //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() || evnt.mi_type != evntConf->getEventType()) { @@ -3701,7 +3701,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) DBUG_RETURN(NULL); } if ((tab->m_status != NdbDictionary::Object::Retrieved) || - (tab->m_id != ev->m_table_id) || + ((Uint32) tab->m_id != ev->m_table_id) || (table_version_major(tab->m_version) != table_version_major(ev->m_table_version))) { @@ -3731,7 +3731,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) DBUG_PRINT("info",("Table: id: %d version: %d", table.m_id, table.m_version)); - if (table.m_id != ev->m_table_id || + if ((Uint32) table.m_id != ev->m_table_id || table_version_major(table.m_version) != table_version_major(ev->m_table_version)) { @@ -3747,7 +3747,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) #endif - if ( attributeList_sz > table.getNoOfColumns() ) + if ( attributeList_sz > (uint) table.getNoOfColumns() ) { m_error.code = 241; DBUG_PRINT("error",("Invalid version, too many columns")); @@ -3757,7 +3757,7 @@ NdbDictionaryImpl::getEvent(const char * eventName, NdbTableImpl* tab) assert( (int)attributeList_sz <= table.getNoOfColumns() ); for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) { - if ( id >= table.getNoOfColumns()) + if ( id >= (uint) table.getNoOfColumns()) { m_error.code = 241; DBUG_PRINT("error",("Invalid version, column %d out of range", id)); diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 08b98cf7b48..1996dec024a 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -58,7 +58,7 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) SubTableData::getOperation(sdata->requestInfo)); for (int i = 0; i <= 2; i++) { printf("sec=%d addr=%p sz=%d\n", i, (void*)ptr[i].p, ptr[i].sz); - for (int j = 0; j < ptr[i].sz; j++) + for (int j = 0; (uint) j < ptr[i].sz; j++) printf("%08x ", ptr[i].p[j]); printf("\n"); } @@ -199,11 +199,11 @@ NdbEventOperationImpl::init(NdbEventImpl& evnt) m_mergeEvents = false; #endif m_ref_count = 0; - DBUG_PRINT("info", ("m_ref_count = 0 for op: %p", this)); + DBUG_PRINT("info", ("m_ref_count = 0 for op: 0x%lx", (long) this)); m_has_error= 0; - DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid)); + DBUG_PRINT("exit",("this: 0x%lx oid: %u", (long) this, m_oid)); DBUG_VOID_RETURN; } @@ -739,8 +739,8 @@ NdbEventOperationImpl::receive_event() NdbTableImpl *tmp_table_impl= m_eventImpl->m_tableImpl; m_eventImpl->m_tableImpl = at; - DBUG_PRINT("info", ("switching table impl 0x%x -> 0x%x", - tmp_table_impl, at)); + DBUG_PRINT("info", ("switching table impl 0x%lx -> 0x%lx", + (long) tmp_table_impl, (long) at)); // change the rec attrs to refer to the new table object int i; @@ -751,9 +751,9 @@ NdbEventOperationImpl::receive_event() { int no = p->getColumn()->getColumnNo(); NdbColumnImpl *tAttrInfo = at->getColumn(no); - DBUG_PRINT("info", ("rec_attr: 0x%x " - "switching column impl 0x%x -> 0x%x", - p, p->m_column, tAttrInfo)); + DBUG_PRINT("info", ("rec_attr: 0x%lx " + "switching column impl 0x%lx -> 0x%lx", + (long) p, (long) p->m_column, (long) tAttrInfo)); p->m_column = tAttrInfo; p = p->next(); } @@ -765,9 +765,9 @@ NdbEventOperationImpl::receive_event() { int no = p->getColumn()->getColumnNo(); NdbColumnImpl *tAttrInfo = at->getColumn(no); - DBUG_PRINT("info", ("rec_attr: 0x%x " - "switching column impl 0x%x -> 0x%x", - p, p->m_column, tAttrInfo)); + DBUG_PRINT("info", ("rec_attr: 0x%lx " + "switching column impl 0x%lx -> 0x%lx", + (long) p, (long) p->m_column, (long) tAttrInfo)); p->m_column = tAttrInfo; p = p->next(); } @@ -1269,8 +1269,9 @@ NdbEventBuffer::getGCIEventOperations(Uint32* iter, Uint32* event_types) EventBufData_list::Gci_op g = gci_ops->m_gci_op_list[(*iter)++]; if (event_types != NULL) *event_types = g.event_types; - DBUG_PRINT("info", ("gci: %d g.op: %x g.event_types: %x", - (unsigned)gci_ops->m_gci, g.op, g.event_types)); + DBUG_PRINT("info", ("gci: %u g.op: 0x%lx g.event_types: 0x%lx", + (unsigned)gci_ops->m_gci, (long) g.op, + (long) g.event_types)); DBUG_RETURN(g.op); } DBUG_RETURN(NULL); @@ -1507,9 +1508,9 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) else { /** out of order something */ - ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld", - bucket-(Gci_container*)m_active_gci.getBase(), - gci, m_latestGCI); + ndbout_c("out of order bucket: %d gci: %ld m_latestGCI: %ld", + (int) (bucket-(Gci_container*)m_active_gci.getBase()), + (long) gci, (long) m_latestGCI); bucket->m_state = Gci_container::GC_COMPLETE; bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused m_latest_complete_GCI = gci; @@ -1563,8 +1564,8 @@ NdbEventBuffer::complete_outof_order_gcis() #endif m_complete_data.m_data.append_list(&bucket->m_data, start_gci); #ifdef VM_TRACE - ndbout_c(" moved %lld rows -> %lld", bucket->m_data.m_count, - m_complete_data.m_data.m_count); + ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count, + (long) m_complete_data.m_data.m_count); #else ndbout_c(""); #endif @@ -2180,7 +2181,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, Ev_t* tp = 0; int i; - for (i = 0; i < sizeof(ev_t)/sizeof(ev_t[0]); i++) { + for (i = 0; (uint) i < sizeof(ev_t)/sizeof(ev_t[0]); i++) { if (ev_t[i].t1 == t1 && ev_t[i].t2 == t2) { tp = &ev_t[i]; break; diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp index 39dbab423d3..9faf66a1e98 100644 --- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -64,6 +64,9 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex, case(NdbDictionary::Index::OrderedIndex): setErrorCodeAbort(4003); return -1; + default: + DBUG_ASSERT(0); + break; } m_theIndex = anIndex; m_accessTable = anIndex->m_table; diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp index e490290b6a2..4ae00348606 100644 --- a/storage/ndb/src/ndbapi/NdbIndexStat.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp @@ -236,7 +236,7 @@ NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint3 int NdbIndexStat::stat_oldest(const Area& a) { - Uint32 i, k, m; + Uint32 i, k= 0, m; bool found = false; m = ~(Uint32)0; // shut up incorrect CC warning for (i = 0; i < a.m_entries; i++) { diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp index 6915a91dd12..d14fcf60ec4 100644 --- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -408,9 +408,9 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, const char* aValuePassed) { DBUG_ENTER("NdbOperation::setValue"); - DBUG_PRINT("enter", ("col=%s op=%d val=%p", + DBUG_PRINT("enter", ("col: %s op:%d val: 0x%lx", tAttrInfo->m_name.c_str(), theOperationType, - aValuePassed)); + (long) aValuePassed)); int tReturnCode; Uint32 tAttrId; diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index 3d8a1d1b93a..38e0b441346 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -207,7 +207,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen); // A simple read is always ignore error - abortOption = tSimpleIndicator ? AO_IgnoreError : abortOption; + abortOption = tSimpleIndicator ? (Uint8) AO_IgnoreError : abortOption; tcKeyReq->setAbortOption(tReqInfo, abortOption); Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; diff --git a/storage/ndb/src/ndbapi/NdbOperationInt.cpp b/storage/ndb/src/ndbapi/NdbOperationInt.cpp index e33e8b09dca..b7fda205450 100644 --- a/storage/ndb/src/ndbapi/NdbOperationInt.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationInt.cpp @@ -1021,8 +1021,8 @@ NdbOperation::branch_col(Uint32 type, bool nopad, Uint32 Label){ DBUG_ENTER("NdbOperation::branch_col"); - DBUG_PRINT("enter", ("type=%u col=%u val=0x%x len=%u label=%u", - type, ColId, val, len, Label)); + DBUG_PRINT("enter", ("type: %u col:%u val: 0x%lx len: %u label: %u", + type, ColId, (long) val, len, Label)); if (val != NULL) DBUG_DUMP("value", (char*)val, len); @@ -1091,53 +1091,61 @@ NdbOperation::branch_col(Uint32 type, int NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::NE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LT, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::GT, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::GE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label)); + INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label); } int NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len, bool nopad, Uint32 Label){ - INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId,len,val,len,nopad,Label)); + INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId, len, (char*) val, len, + nopad, Label)); return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label); } diff --git a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp index 65d50a55634..e7b8a59d9b2 100644 --- a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -57,9 +57,9 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, const char* aValuePassed) { DBUG_ENTER("NdbOperation::equal_impl"); - DBUG_PRINT("enter", ("col=%s op=%d val=%p", + DBUG_PRINT("enter", ("col: %s op: %d val: 0x%lx", tAttrInfo->m_name.c_str(), theOperationType, - aValuePassed)); + (long) aValuePassed)); Uint32 tData; const char* aValue = aValuePassed; diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp index 5931a00fcf7..edd48f50ce3 100644 --- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp @@ -372,7 +372,12 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) j = length; } break; - unknown: + + case NdbDictionary::Column::Undefined: + case NdbDictionary::Column::Mediumint: + case NdbDictionary::Column::Mediumunsigned: + case NdbDictionary::Column::Longvarbinary: + unknown: //default: /* no print functions for the rest, just print type */ out << (int) r.getType(); j = length; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 2d47f79ee09..3e2081b6018 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -181,7 +181,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm, } bool rangeScan = false; - if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex) + if ( (int) m_accessTable->m_indexType == + (int) NdbDictionary::Index::OrderedIndex) { if (m_currentTable == m_accessTable){ // Old way of scanning indexes, should not be allowed @@ -588,7 +589,7 @@ err4: theNdbCon->theTransactionIsStarted = false; theNdbCon->theReleaseOnClose = true; - if(DEBUG_NEXT_RESULT) ndbout_c("return -1", retVal); + if(DEBUG_NEXT_RESULT) ndbout_c("return %d", retVal); return -1; } @@ -668,9 +669,9 @@ NdbScanOperation::doSend(int ProcessorId) void NdbScanOperation::close(bool forceSend, bool releaseOp) { DBUG_ENTER("NdbScanOperation::close"); - DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d", - (UintPtr)this, - (UintPtr)m_transConnection, (UintPtr)theNdbCon, + DBUG_PRINT("enter", ("this: 0x%lx tcon: 0x%lx con: 0x%lx force: %d release: %d", + (long) this, + (long) m_transConnection, (long) theNdbCon, forceSend, releaseOp)); if(m_transConnection){ diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp index 0cbd67a38f6..0c59746c1e9 100644 --- a/storage/ndb/src/ndbapi/NdbTransaction.cpp +++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp @@ -533,8 +533,8 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, AbortOption abortOption) { DBUG_ENTER("NdbTransaction::executeAsynchPrepare"); - DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: %x, anyObject: %x", - aTypeOfExec, aCallback, anyObject)); + DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: 0x%lx, anyObject: Ox%lx", + aTypeOfExec, (long) aCallback, (long) anyObject)); /** * Reset error.code on execute @@ -1010,7 +1010,7 @@ void NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp) { DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation"); - DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp)); + DBUG_PRINT("enter", ("this: 0x%lx op: 0x%lx", (ulong) this, (ulong) cursorOp)); releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp); diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp index 5683ebe2e6f..599a38b287d 100644 --- a/storage/ndb/src/ndbapi/Ndbif.cpp +++ b/storage/ndb/src/ndbapi/Ndbif.cpp @@ -199,11 +199,11 @@ void Ndb::connected(Uint32 ref) ((Uint64)tmpTheNode << 40); theFirstTransId += theFacade->m_max_trans_id; // assert(0); - DBUG_PRINT("info",("connected with ref=%x, id=%d, no_db_nodes=%d, first_trans_id=%lx", + DBUG_PRINT("info",("connected with ref=%x, id=%d, no_db_nodes=%d, first_trans_id: 0x%lx", theMyRef, tmpTheNode, theImpl->theNoOfDBnodes, - theFirstTransId)); + (long) theFirstTransId)); theCommitAckSignal = new NdbApiSignal(theMyRef); theDictionary->m_receiver.m_reference= theMyRef; diff --git a/storage/ndb/src/ndbapi/Ndbinit.cpp b/storage/ndb/src/ndbapi/Ndbinit.cpp index 3d7d1b768f2..de67b99c8d8 100644 --- a/storage/ndb/src/ndbapi/Ndbinit.cpp +++ b/storage/ndb/src/ndbapi/Ndbinit.cpp @@ -43,7 +43,7 @@ Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection, : theImpl(NULL) { DBUG_ENTER("Ndb::Ndb()"); - DBUG_PRINT("enter",("Ndb::Ndb this=0x%x", this)); + DBUG_PRINT("enter",("Ndb::Ndb this: 0x%lx", (long) this)); setup(ndb_cluster_connection, aDataBase, aSchema); DBUG_VOID_RETURN; } @@ -132,7 +132,7 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection, Ndb::~Ndb() { DBUG_ENTER("Ndb::~Ndb()"); - DBUG_PRINT("enter",("this=0x%x",this)); + DBUG_PRINT("enter",("this: 0x%lx", (long) this)); if (m_sys_tab_0) getDictionary()->removeTableGlobal(*m_sys_tab_0, 0); diff --git a/storage/ndb/src/ndbapi/Ndblist.cpp b/storage/ndb/src/ndbapi/Ndblist.cpp index f82348fc91d..a0d22466db4 100644 --- a/storage/ndb/src/ndbapi/Ndblist.cpp +++ b/storage/ndb/src/ndbapi/Ndblist.cpp @@ -361,7 +361,7 @@ void Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation) { DBUG_ENTER("Ndb::releaseScanOperation"); - DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation)); + DBUG_PRINT("enter", ("op: 0x%lx", (ulong) aScanOperation)); #ifdef ndb_release_check_dup { NdbIndexScanOperation* tOp = theScanOpIdleList; while (tOp != NULL) { diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp index e3db479f677..b211e2956dd 100644 --- a/storage/ndb/src/ndbapi/ObjectMap.hpp +++ b/storage/ndb/src/ndbapi/ObjectMap.hpp @@ -84,7 +84,7 @@ NdbObjectIdMap::map(void * object){ // unlock(); - DBUG_PRINT("info",("NdbObjectIdMap::map(0x%x) %u", object, ff<<2)); + DBUG_PRINT("info",("NdbObjectIdMap::map(0x%lx) %u", (long) object, ff<<2)); return ff<<2; } @@ -102,14 +102,16 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){ m_map[i].m_next = m_firstFree; m_firstFree = i; } else { - ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj); - DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", id, object, obj)); + ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%lx) obj=0x%lx", + id, (long) object, (long) obj); + DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx", + id, (long) object, (long) obj)); return 0; } // unlock(); - DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj)); + DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%lx", id, (long) obj)); return obj; } diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 501901bf8ba..3bb6b2fe414 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -271,7 +271,7 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char * m_latest_trans_gci(0) { DBUG_ENTER("Ndb_cluster_connection"); - DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%x", this)); + DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%lx", (long) this)); if (!m_event_add_drop_mutex) m_event_add_drop_mutex= NdbMutex_Create(); diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index c042f745d9d..2a91d3215f5 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -131,7 +131,7 @@ int desc_logfilegroup(Ndb *myndb, char* name) assert(dict); NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if( (int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: LogfileGroup" << endl; @@ -153,7 +153,7 @@ int desc_tablespace(Ndb *myndb, char* name) assert(dict); NdbDictionary::Tablespace ts= dict->getTablespace(name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Tablespace" << endl; @@ -175,11 +175,11 @@ int desc_undofile(Ndb_cluster_connection &con, Ndb *myndb, char* name) con.init_get_next_node(iter); - while(id= con.get_next_node(iter)) + while ((id= con.get_next_node(iter))) { NdbDictionary::Undofile uf= dict->getUndofile(0, name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Undofile" << endl; @@ -211,11 +211,11 @@ int desc_datafile(Ndb_cluster_connection &con, Ndb *myndb, char* name) con.init_get_next_node(iter); - while(id= con.get_next_node(iter)) + while ((id= con.get_next_node(iter))) { NdbDictionary::Datafile df= dict->getDatafile(id, name); NdbError err= dict->getNdbError(); - if(err.classification!=ndberror_cl_none) + if ((int) err.classification != (int) ndberror_cl_none) return 0; ndbout << "Type: Datafile" << endl; diff --git a/storage/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp index aa207212dbe..c10211a9108 100644 --- a/storage/ndb/tools/drop_index.cpp +++ b/storage/ndb/tools/drop_index.cpp @@ -51,9 +51,6 @@ int main(int argc, char** argv){ NDB_INIT(argv[0]); load_defaults("my",load_default_groups,&argc,&argv); int ho_error; -#ifndef DBUG_OFF - "d:t:O,/tmp/ndb_drop_index.trace"; -#endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); diff --git a/storage/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp index d14c60a2c6d..61df4ee9b34 100644 --- a/storage/ndb/tools/drop_tab.cpp +++ b/storage/ndb/tools/drop_tab.cpp @@ -51,9 +51,6 @@ int main(int argc, char** argv){ NDB_INIT(argv[0]); load_defaults("my",load_default_groups,&argc,&argv); int ho_error; -#ifndef DBUG_OFF - "d:t:O,/tmp/ndb_drop_table.trace"; -#endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_condig.cpp index 8b862391c8e..049e4599447 100644 --- a/storage/ndb/tools/ndb_condig.cpp +++ b/storage/ndb/tools/ndb_condig.cpp @@ -114,6 +114,7 @@ struct Match int m_key; BaseString m_value; virtual int eval(const Iter&); + virtual ~Match() {} }; struct HostMatch : public Match @@ -127,6 +128,7 @@ struct Apply Apply(int val) { m_key = val;} int m_key; virtual int apply(const Iter&); + virtual ~Apply() {} }; struct NodeTypeApply : public Apply diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index ef535cf9e26..b51760266cb 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -300,7 +300,13 @@ RestoreMetaData::markSysTables() strcmp(tableName, "NDB$EVENTS_0") == 0 || strcmp(tableName, "sys/def/SYSTAB_0") == 0 || strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 || + /* + The following is for old MySQL versions, + before we changed the database name of the tables from + "cluster_replication" -> "cluster" -> "mysql" + */ strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 || + strcmp(tableName, "cluster/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 || strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 ) table->isSysTable = true; @@ -317,7 +323,7 @@ RestoreMetaData::markSysTables() Uint32 j; for (j = 0; j < getNoOfTables(); j++) { TableS* table = allTables[j]; - if (table->getTableId() == id1) { + if (table->getTableId() == (Uint32) id1) { if (table->isSysTable) blobTable->isSysTable = true; break; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 507058e2743..7524558a2d6 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -494,7 +494,7 @@ BackupRestore::object(Uint32 type, const void * ptr) NdbDictionary::Tablespace curr = dict->getTablespace(old.getName()); NdbError errobj = dict->getNdbError(); - if(errobj.classification == ndberror_cl_none) + if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr); NdbDictionary::Tablespace * null = 0; @@ -533,7 +533,7 @@ BackupRestore::object(Uint32 type, const void * ptr) NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName()); NdbError errobj = dict->getNdbError(); - if(errobj.classification == ndberror_cl_none) + if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::LogfileGroup* currptr = new NdbDictionary::LogfileGroup(curr); @@ -680,7 +680,7 @@ BackupRestore::table(const TableS & table){ return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); - if(tmptab.m_indexType != NdbDictionary::Index::Undefined){ + if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } |