diff options
author | Konstantin Osipov <kostja@sun.com> | 2010-07-27 18:32:42 +0400 |
---|---|---|
committer | Konstantin Osipov <kostja@sun.com> | 2010-07-27 18:32:42 +0400 |
commit | 2abe7b9d4ee96919cc0504be3805c8712a46a532 (patch) | |
tree | f5b4dd56c20ccff8f87fcf2ff244d4be368ac972 /storage | |
parent | 6d059673f7dc1dbff5e154b0ca6d1ef2f0fa3cc3 (diff) | |
parent | c6a34a99616c749c1d4874c9e7f7424fd2765de6 (diff) | |
download | mariadb-git-2abe7b9d4ee96919cc0504be3805c8712a46a532.tar.gz |
Merge trunk-bugfixing -> trunk-runtime.
Diffstat (limited to 'storage')
60 files changed, 328 insertions, 603 deletions
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 30c4c4d58ca..216097f0fdc 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -807,15 +807,15 @@ int ha_tina::find_current_row(uchar *buf) Field_blob *blob= *(Field_blob**) field; uchar *src, *tgt; uint length, packlength; - + packlength= blob->pack_length_no_ptr(); length= blob->get_length(blob->ptr); - memcpy_fixed(&src, blob->ptr + packlength, sizeof(char*)); + memcpy(&src, blob->ptr + packlength, sizeof(char*)); if (src) { tgt= (uchar*) alloc_root(&blobroot, length); bmove(tgt, src, length); - memcpy_fixed(blob->ptr + packlength, &tgt, sizeof(char*)); + memcpy(blob->ptr + packlength, &tgt, sizeof(char*)); } } } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index d17b56bd4b1..b1ae276dce8 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -2752,9 +2752,9 @@ void ha_federated::position(const uchar *record __attribute__ ((unused))) position_called= TRUE; /* Store result set address. */ - memcpy_fixed(ref, &stored_result, sizeof(MYSQL_RES *)); + memcpy(ref, &stored_result, sizeof(MYSQL_RES *)); /* Store data cursor position. */ - memcpy_fixed(ref + sizeof(MYSQL_RES *), ¤t_position, + memcpy(ref + sizeof(MYSQL_RES *), ¤t_position, sizeof(MYSQL_ROW_OFFSET)); DBUG_VOID_RETURN; } @@ -2780,11 +2780,11 @@ int ha_federated::rnd_pos(uchar *buf, uchar *pos) ha_statistic_increment(&SSV::ha_read_rnd_count); /* Get stored result set. */ - memcpy_fixed(&result, pos, sizeof(MYSQL_RES *)); + memcpy(&result, pos, sizeof(MYSQL_RES *)); DBUG_ASSERT(result); /* Set data cursor position. */ - memcpy_fixed(&result->data_cursor, pos + sizeof(MYSQL_RES *), - sizeof(MYSQL_ROW_OFFSET)); + memcpy(&result->data_cursor, pos + sizeof(MYSQL_RES *), + sizeof(MYSQL_ROW_OFFSET)); /* Read a row. */ ret_val= read_next(buf, result); MYSQL_READ_ROW_DONE(ret_val); diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 350958f8230..481257def1d 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -390,7 +390,7 @@ int ha_heap::rnd_pos(uchar * buf, uchar *pos) MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str, FALSE); ha_statistic_increment(&SSV::ha_read_rnd_count); - memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); + memcpy(&heap_position, pos, sizeof(HEAP_PTR)); error=heap_rrnd(file, buf, heap_position); table->status=error ? STATUS_NOT_FOUND: 0; MYSQL_READ_ROW_DONE(error); @@ -654,7 +654,7 @@ heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table, parts * sizeof(HA_KEYSEG), MYF(MY_WME)))) return my_errno; - seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys); + seg= reinterpret_cast<HA_KEYSEG*>(keydef + keys); for (key= 0; key < keys; key++) { KEY *pos= table_arg->key_info+key; diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c index 1571fc98402..3ee903be7ed 100644 --- a/storage/heap/hp_test2.c +++ b/storage/heap/hp_test2.c @@ -179,11 +179,6 @@ int main(int argc, char *argv[]) printf("can't find key1: \"%s\"\n",(char*) key); goto err; } -#ifdef NOT_USED - if (file->current_ptr == hp_find_block(&file->s->block,0) || - file->current_ptr == hp_find_block(&file->s->block,1)) - continue; /* Don't remove 2 first records */ -#endif if (heap_delete(file,record)) { printf("error: %d; can't delete record: \"%s\"\n", my_errno,(char*) record); diff --git a/storage/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c index f185371bfca..09353c45c8c 100644 --- a/storage/innobase/dict/dict0crea.c +++ b/storage/innobase/dict/dict0crea.c @@ -240,17 +240,29 @@ dict_build_table_def_step( ibool is_path; mtr_t mtr; ulint space = 0; + ibool file_per_table; ut_ad(mutex_own(&(dict_sys->mutex))); table = node->table; - dict_hdr_get_new_id(&table->id, NULL, - srv_file_per_table ? &space : NULL); + /* Cache the global variable "srv_file_per_table" to + a local variable before using it. Please note + "srv_file_per_table" is not under dict_sys mutex + protection, and could be changed while executing + this function. So better to cache the current value + to a local variable, and all future reference to + "srv_file_per_table" should use this local variable. */ + file_per_table = srv_file_per_table; + + dict_hdr_get_new_id(&table->id, NULL, NULL); thr_get_trx(thr)->table_id = table->id; - if (srv_file_per_table) { + if (file_per_table) { + /* Get a new space id if srv_file_per_table is set */ + dict_hdr_get_new_id(NULL, NULL, &space); + if (UNIV_UNLIKELY(space == ULINT_UNDEFINED)) { return(DB_ERROR); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index f5227add6f1..716b7bbbd56 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4128,6 +4128,11 @@ get_innobase_type_from_mysql_type( case MYSQL_TYPE_BLOB: case MYSQL_TYPE_LONG_BLOB: return(DATA_BLOB); + case MYSQL_TYPE_NULL: + /* MySQL currently accepts "NULL" datatype, but will + reject such datatype in the next release. We will cope + with it and not trigger assertion failure in 5.1 */ + break; default: ut_error; } @@ -6175,7 +6180,22 @@ create_table_def( field = form->field[i]; col_type = get_innobase_type_from_mysql_type(&unsigned_type, - field); + field); + + if (!col_type) { + push_warning_printf( + (THD*) trx->mysql_thd, + MYSQL_ERROR::WARN_LEVEL_WARN, + ER_CANT_CREATE_TABLE, + "Error creating table '%s' with " + "column '%s'. Please check its " + "column type and try to re-create " + "the table with an appropriate " + "column type.", + table->name, (char*) field->field_name); + goto err_col; + } + if (field->null_ptr) { nulls_allowed = 0; } else { @@ -6233,7 +6253,7 @@ create_table_def( if (dict_col_name_is_reserved(field->field_name)){ my_error(ER_WRONG_COLUMN_NAME, MYF(0), field->field_name); - +err_col: dict_mem_table_free(table); trx_commit_for_mysql(trx); diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c index 7c502d616d3..a7117386608 100644 --- a/storage/innobase/os/os0file.c +++ b/storage/innobase/os/os0file.c @@ -1445,7 +1445,11 @@ try_again: /* When srv_file_per_table is on, file creation failure may not be critical to the whole instance. Do not crash the server in - case of unknown errors. */ + case of unknown errors. + Please note "srv_file_per_table" is a global variable with + no explicit synchronization protection. It could be + changed during this execution path. It might not have the + same value as the one when building the table definition */ if (srv_file_per_table) { retry = os_file_handle_error_no_exit(name, create_mode == OS_FILE_CREATE ? @@ -1532,7 +1536,11 @@ try_again: /* When srv_file_per_table is on, file creation failure may not be critical to the whole instance. Do not crash the server in - case of unknown errors. */ + case of unknown errors. + Please note "srv_file_per_table" is a global variable with + no explicit synchronization protection. It could be + changed during this execution path. It might not have the + same value as the one when building the table definition */ if (srv_file_per_table) { retry = os_file_handle_error_no_exit(name, create_mode == OS_FILE_CREATE ? diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c index 33c1e092a00..b54b4c6ce49 100644 --- a/storage/myisam/ft_boolean_search.c +++ b/storage/myisam/ft_boolean_search.c @@ -92,6 +92,8 @@ static double *nwghts=_nwghts+5; /* nwghts[i] = -0.5*1.5**i */ #define FTB_FLAG_NO 4 #define FTB_FLAG_WONLY 8 +#define CMP_NUM(a,b) (((a) < (b)) ? -1 : ((a) == (b)) ? 0 : 1) + typedef struct st_ftb_expr FTB_EXPR; struct st_ftb_expr { diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c index 7d9b13b7714..937bb6ffe19 100644 --- a/storage/myisam/ft_nlq_search.c +++ b/storage/myisam/ft_nlq_search.c @@ -197,7 +197,8 @@ static int walk_and_push(FT_SUPERDOC *from, static int FT_DOC_cmp(void *unused __attribute__((unused)), FT_DOC *a, FT_DOC *b) { - return sgn(b->weight - a->weight); + double c= b->weight - a->weight; + return ((c < 0) ? -1 : (c > 0) ? 1 : 0); } diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c index 8f5779b04ee..663d7869f71 100644 --- a/storage/myisam/ft_parser.c +++ b/storage/myisam/ft_parser.c @@ -39,7 +39,7 @@ static int walk_and_copy(FT_WORD *word,uint32 count,FT_DOCSTAT *docstat) { word->weight=LWS_IN_USE; docstat->sum+=word->weight; - memcpy_fixed((docstat->list)++,word,sizeof(FT_WORD)); + memcpy((docstat->list)++, word, sizeof(FT_WORD)); return 0; } diff --git a/storage/myisam/ft_update.c b/storage/myisam/ft_update.c index d1548e32870..a2ddb49ecf0 100644 --- a/storage/myisam/ft_update.c +++ b/storage/myisam/ft_update.c @@ -83,8 +83,7 @@ uint _mi_ft_segiterator(register FT_SEG_ITERATOR *ftsi) if (ftsi->seg->flag & HA_BLOB_PART) { ftsi->len=_mi_calc_blob_length(ftsi->seg->bit_start,ftsi->pos); - memcpy_fixed((char*) &ftsi->pos, ftsi->pos+ftsi->seg->bit_start, - sizeof(char*)); + memcpy(&ftsi->pos, ftsi->pos+ftsi->seg->bit_start, sizeof(char*)); DBUG_RETURN(1); } ftsi->len=ftsi->seg->length; diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 13427130069..6bf01cd63c7 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -52,11 +52,6 @@ #endif #include "rt_index.h" -#ifndef USE_RAID -#define my_raid_create(K, A, B, C, D, E, F, G) mysql_file_create(K, A, B, C, G) -#define my_raid_delete(K, A, B, C) mysql_file_delete(K, A, B) -#endif - /* Functions defined in this file */ static int check_k_link(MI_CHECK *param, MI_INFO *info,uint nr); @@ -1577,15 +1572,12 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, if (!rep_quick) { /* Get real path for data file */ - if ((new_file= my_raid_create(mi_key_file_datatmp, - fn_format(param->temp_filename, - share->data_file_name, "", - DATA_TMP_EXT, 2+4), - 0, param->tmpfile_createflag, - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize, - MYF(0))) < 0) + if ((new_file= mysql_file_create(mi_key_file_datatmp, + fn_format(param->temp_filename, + share->data_file_name, "", + DATA_TMP_EXT, 2+4), + 0, param->tmpfile_createflag, + MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); @@ -1751,8 +1743,7 @@ err: (size_t) info->s->mmaped_length); info->s->file_map= NULL; } - if (change_to_newfile(share->data_file_name,MI_NAME_DEXT, - DATA_TMP_EXT, share->base.raid_chunks, + if (change_to_newfile(share->data_file_name, MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) @@ -1767,9 +1758,8 @@ err: if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); - (void) my_raid_delete(mi_key_file_datatmp, - param->temp_filename, info->s->base.raid_chunks, - MYF(MY_WME)); + (void) mysql_file_delete(mi_key_file_datatmp, + param->temp_filename, MYF(MY_WME)); info->rec_cache.file=-1; /* don't flush data to new_file, it's closed */ } mi_mark_crashed_on_repair(info); @@ -2011,7 +2001,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, char * name) (void) mysql_file_close(share->kfile, MYF(MY_WME)); share->kfile = -1; (void) mysql_file_close(new_file, MYF(MY_WME)); - if (change_to_newfile(share->index_file_name,MI_NAME_IEXT,INDEX_TMP_EXT,0, + if (change_to_newfile(share->index_file_name, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0)) || mi_open_keyfile(share)) goto err2; @@ -2141,18 +2131,9 @@ err: */ int change_to_newfile(const char * filename, const char * old_ext, - const char * new_ext, - uint raid_chunks __attribute__((unused)), - myf MyFlags) + const char * new_ext, myf MyFlags) { char old_filename[FN_REFLEN],new_filename[FN_REFLEN]; -#ifdef USE_RAID - if (raid_chunks) - return my_raid_redel(fn_format(old_filename,filename,"",old_ext,2+4), - fn_format(new_filename,filename,"",new_ext,2+4), - raid_chunks, - MYF(MY_WME | MY_LINK_WARNING | MyFlags)); -#endif /* Get real path to filename */ (void) fn_format(old_filename,filename,"",old_ext,2+4+32); return my_redel(old_filename, @@ -2293,15 +2274,12 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, if (!rep_quick) { /* Get real path for data file */ - if ((new_file= my_raid_create(mi_key_file_datatmp, - fn_format(param->temp_filename, - share->data_file_name, "", - DATA_TMP_EXT, 2+4), - 0, param->tmpfile_createflag, - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize, - MYF(0))) < 0) + if ((new_file= mysql_file_create(mi_key_file_datatmp, + fn_format(param->temp_filename, + share->data_file_name, "", + DATA_TMP_EXT, 2+4), + 0, param->tmpfile_createflag, + MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); @@ -2527,7 +2505,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, skr < share->base.reloc*share->base.min_pack_length) skr=share->base.reloc*share->base.min_pack_length; #endif - if (skr != sort_info.filelength && !info->s->base.raid_type) + if (skr != sort_info.filelength) if (mysql_file_chsize(info->dfile, skr, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of datafile, error: %d", @@ -2565,8 +2543,7 @@ err: { mysql_file_close(new_file, MYF(0)); info->dfile=new_file= -1; - if (change_to_newfile(share->data_file_name,MI_NAME_DEXT, - DATA_TMP_EXT, share->base.raid_chunks, + if (change_to_newfile(share->data_file_name,MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) @@ -2580,9 +2557,8 @@ err: if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); - (void) my_raid_delete(mi_key_file_datatmp, - param->temp_filename, share->base.raid_chunks, - MYF(MY_WME)); + (void) mysql_file_delete(mi_key_file_datatmp, + param->temp_filename, MYF(MY_WME)); if (info->dfile == new_file) /* Retry with key cache */ if (unlikely(mi_open_datafile(info, share, name, -1))) param->retry_repair= 0; /* Safety */ @@ -2751,16 +2727,12 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, if (!rep_quick) { /* Get real path for data file */ - if ((new_file= my_raid_create(mi_key_file_datatmp, - fn_format(param->temp_filename, - share->data_file_name, "", - DATA_TMP_EXT, - 2+4), - 0, param->tmpfile_createflag, - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize, - MYF(0))) < 0) + if ((new_file= mysql_file_create(mi_key_file_datatmp, + fn_format(param->temp_filename, + share->data_file_name, "", + DATA_TMP_EXT, 2+4), + 0, param->tmpfile_createflag, + MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); @@ -3055,7 +3027,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, skr < share->base.reloc*share->base.min_pack_length) skr=share->base.reloc*share->base.min_pack_length; #endif - if (skr != sort_info.filelength && !info->s->base.raid_type) + if (skr != sort_info.filelength) if (mysql_file_chsize(info->dfile, skr, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of datafile, error: %d", @@ -3105,8 +3077,7 @@ err: { mysql_file_close(new_file, MYF(0)); info->dfile=new_file= -1; - if (change_to_newfile(share->data_file_name,MI_NAME_DEXT, - DATA_TMP_EXT, share->base.raid_chunks, + if (change_to_newfile(share->data_file_name, MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) @@ -3120,9 +3091,8 @@ err: if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); - (void) my_raid_delete(mi_key_file_datatmp, - param->temp_filename, share->base.raid_chunks, - MYF(MY_WME)); + (void) mysql_file_delete(mi_key_file_datatmp, + param->temp_filename, MYF(MY_WME)); if (info->dfile == new_file) /* Retry with key cache */ if (unlikely(mi_open_datafile(info, share, name, -1))) param->retry_repair= 0; /* Safety */ diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c index 4a91c2d939b..46c61eb4709 100644 --- a/storage/myisam/mi_create.c +++ b/storage/myisam/mi_create.c @@ -549,11 +549,6 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, share.base.pack_bits=packed; share.base.fields=fields; share.base.pack_fields=packed; -#ifdef USE_RAID - share.base.raid_type=ci->raid_type; - share.base.raid_chunks=ci->raid_chunks; - share.base.raid_chunksize=ci->raid_chunksize; -#endif /* max_data_file_length and max_key_file_length are recalculated on open */ if (options & HA_OPTION_TMP_TABLE) @@ -642,20 +637,6 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, if (!(flags & HA_DONT_TOUCH_DATA)) { -#ifdef USE_RAID - if (share.base.raid_type) - { - (void) fn_format(filename, name, "", MI_NAME_DEXT, - MY_UNPACK_FILENAME | MY_APPEND_EXT); - if ((dfile=my_raid_create(filename, 0, create_mode, - share.base.raid_type, - share.base.raid_chunks, - share.base.raid_chunksize, - MYF(MY_WME | MY_RAID))) < 0) - goto err; - } - else -#endif { if (ci->data_file_name) { @@ -841,7 +822,6 @@ err: (void) mysql_file_close(dfile, MYF(0)); /* fall through */ case 2: - /* QQ: Tõnu should add a call to my_raid_delete() here */ if (! (flags & HA_DONT_TOUCH_DATA)) mysql_file_delete_with_symlink(mi_key_file_dfile, fn_format(filename, name, "", MI_NAME_DEXT, diff --git a/storage/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c index 58a60a760aa..a05a2ad6237 100644 --- a/storage/myisam/mi_delete_table.c +++ b/storage/myisam/mi_delete_table.c @@ -22,40 +22,11 @@ int mi_delete_table(const char *name) { char from[FN_REFLEN]; -#ifdef USE_RAID - uint raid_type=0,raid_chunks=0; -#endif DBUG_ENTER("mi_delete_table"); #ifdef EXTRA_DEBUG check_table_is_closed(name,"delete"); #endif -#ifdef USE_RAID - { - MI_INFO *info; - /* - When built with RAID support, we need to determine if this table - makes use of the raid feature. If yes, we need to remove all raid - chunks. This is done with my_raid_delete(). Unfortunately it is - necessary to open the table just to check this. We use - 'open_for_repair' to be able to open even a crashed table. If even - this open fails, we assume no raid configuration for this table - and try to remove the normal data file only. This may however - leave the raid chunks behind. - */ - if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR))) - raid_type= 0; - else - { - raid_type= info->s->base.raid_type; - raid_chunks= info->s->base.raid_chunks; - mi_close(info); - } - } -#ifdef EXTRA_DEBUG - check_table_is_closed(name,"delete"); -#endif -#endif /* USE_RAID */ fn_format(from,name,"",MI_NAME_IEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); if (my_is_symlink(from) && (*myisam_test_invalid_symlink)(from)) @@ -73,10 +44,6 @@ int mi_delete_table(const char *name) DBUG_RETURN(my_errno); } fn_format(from,name,"",MI_NAME_DEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); -#ifdef USE_RAID - if (raid_type) - DBUG_RETURN(my_raid_delete(from, raid_chunks, MYF(MY_WME)) ? my_errno : 0); -#endif if (my_is_symlink(from) && (*myisam_test_invalid_symlink)(from)) { /* diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c index 59b895b5e64..f429edd2759 100644 --- a/storage/myisam/mi_dynrec.c +++ b/storage/myisam/mi_dynrec.c @@ -283,13 +283,6 @@ int _mi_write_blob_record(MI_INFO *info, const uchar *record) MI_DYN_DELETE_BLOCK_HEADER+1); reclength= (info->s->base.pack_reclength + _my_calc_total_blob_length(info,record)+ extra); -#ifdef NOT_USED /* We now support big rows */ - if (reclength > MI_DYN_MAX_ROW_LENGTH) - { - my_errno=HA_ERR_TO_BIG_ROW; - return -1; - } -#endif if (!(rec_buff=(uchar*) my_alloca(reclength))) { my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */ @@ -317,13 +310,6 @@ int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const uchar *record) MI_DYN_DELETE_BLOCK_HEADER); reclength= (info->s->base.pack_reclength+ _my_calc_total_blob_length(info,record)+ extra); -#ifdef NOT_USED /* We now support big rows */ - if (reclength > MI_DYN_MAX_ROW_LENGTH) - { - my_errno=HA_ERR_TO_BIG_ROW; - return -1; - } -#endif if (!(rec_buff=(uchar*) my_alloca(reclength))) { my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */ @@ -1009,7 +995,7 @@ uint _mi_rec_pack(MI_INFO *info, register uchar *to, char *temp_pos; size_t tmp_length=length-portable_sizeof_char_ptr; memcpy((uchar*) to,from,tmp_length); - memcpy_fixed(&temp_pos,from+tmp_length,sizeof(char*)); + memcpy(&temp_pos,from+tmp_length,sizeof(char*)); memcpy(to+tmp_length,temp_pos,(size_t) blob->length); to+=tmp_length+blob->length; } @@ -1324,9 +1310,9 @@ ulong _mi_rec_unpack(register MI_INFO *info, register uchar *to, uchar *from, from_left - size_length < blob_length || from_left - size_length - blob_length < min_pack_length) goto err; - memcpy((uchar*) to,(uchar*) from,(size_t) size_length); + memcpy(to, from, (size_t) size_length); from+=size_length; - memcpy_fixed((uchar*) to+size_length,(uchar*) &from,sizeof(char*)); + memcpy(to+size_length, &from, sizeof(char*)); from+=blob_length; } else diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c index 3f445ebf44d..bce42b64e99 100644 --- a/storage/myisam/mi_key.c +++ b/storage/myisam/mi_key.c @@ -139,7 +139,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, else if (keyseg->flag & HA_BLOB_PART) { uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos); - memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*)); + memcpy(&pos,pos+keyseg->bit_start,sizeof(char*)); set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); diff --git a/storage/myisam/mi_log.c b/storage/myisam/mi_log.c index f6bbaab1f87..5af4a057a95 100644 --- a/storage/myisam/mi_log.c +++ b/storage/myisam/mi_log.c @@ -149,7 +149,7 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info, blob != end ; blob++) { - memcpy_fixed((uchar*) &pos, record+blob->offset+blob->pack_length, + memcpy(&pos, record+blob->offset+blob->pack_length, sizeof(char*)); (void) mysql_file_write(myisam_log_file, pos, blob->length, MYF(0)); } diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index 5b3da9841b8..e3c29909067 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -260,25 +260,6 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) set_if_smaller(max_data_file_length, INT_MAX32); set_if_smaller(max_key_file_length, INT_MAX32); #endif -#if USE_RAID && SYSTEM_SIZEOF_OFF_T == 4 - set_if_smaller(max_key_file_length, INT_MAX32); - if (!share->base.raid_type) - { - set_if_smaller(max_data_file_length, INT_MAX32); - } - else - { - set_if_smaller(max_data_file_length, - (ulonglong) share->base.raid_chunks << 31); - } -#elif !defined(USE_RAID) - if (share->base.raid_type) - { - DBUG_PRINT("error",("Table uses RAID but we don't have RAID support")); - my_errno=HA_ERR_UNSUPPORTED; - goto err; - } -#endif share->base.max_data_file_length=(my_off_t) max_data_file_length; share->base.max_key_file_length=(my_off_t) max_key_file_length; @@ -877,7 +858,7 @@ uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite) key_blocks=state->header.max_block_size_index; DBUG_ENTER("mi_state_info_write"); - memcpy_fixed(ptr,&state->header,sizeof(state->header)); + memcpy(ptr, &state->header, sizeof(state->header)); ptr+=sizeof(state->header); /* open_count must be first because of _mi_mark_file_changed ! */ @@ -936,7 +917,7 @@ uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite) uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state) { uint i,keys,key_parts,key_blocks; - memcpy_fixed(&state->header,ptr, sizeof(state->header)); + memcpy(&state->header, ptr, sizeof(state->header)); ptr +=sizeof(state->header); keys=(uint) state->header.keys; key_parts=mi_uint2korr(state->header.key_parts); @@ -1036,10 +1017,7 @@ uint mi_base_info_write(File file, MI_BASE_INFO *base) mi_int2store(ptr,base->max_key_length); ptr +=2; mi_int2store(ptr,base->extra_alloc_bytes); ptr +=2; *ptr++= base->extra_alloc_procent; - *ptr++= base->raid_type; - mi_int2store(ptr,base->raid_chunks); ptr +=2; - mi_int4store(ptr,base->raid_chunksize); ptr +=4; - bzero(ptr,6); ptr +=6; /* extra */ + bzero(ptr,13); ptr +=13; /* extra */ return mysql_file_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0; } @@ -1070,17 +1048,8 @@ uchar *my_n_base_info_read(uchar *ptr, MI_BASE_INFO *base) base->max_key_length = mi_uint2korr(ptr); ptr +=2; base->extra_alloc_bytes = mi_uint2korr(ptr); ptr +=2; base->extra_alloc_procent = *ptr++; - base->raid_type= *ptr++; - base->raid_chunks= mi_uint2korr(ptr); ptr +=2; - base->raid_chunksize= mi_uint4korr(ptr); ptr +=4; - /* TO BE REMOVED: Fix for old RAID files */ - if (base->raid_type == 0) - { - base->raid_chunks=0; - base->raid_chunksize=0; - } - ptr+=6; + ptr+=13; return ptr; } @@ -1223,7 +1192,7 @@ uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo) } /************************************************************************** -Open data file with or without RAID +Open data file. We can't use dup() here as the data file descriptors need to have different active seek-positions. @@ -1251,20 +1220,8 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *org_name, data_name= real_data_name; } } -#ifdef USE_RAID - if (share->base.raid_type) - { - info->dfile=my_raid_open(data_name, - share->mode | O_SHARE, - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize, - MYF(MY_WME | MY_RAID)); - } - else -#endif - info->dfile= mysql_file_open(mi_key_file_dfile, - data_name, share->mode | O_SHARE, MYF(MY_WME)); + info->dfile= mysql_file_open(mi_key_file_dfile, + data_name, share->mode | O_SHARE, MYF(MY_WME)); return info->dfile >= 0 ? 0 : 1; } diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c index 0ba495fdd68..d8d892a5bc9 100644 --- a/storage/myisam/mi_packrec.c +++ b/storage/myisam/mi_packrec.c @@ -1051,8 +1051,7 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, } decode_bytes(rec,bit_buff,bit_buff->blob_pos,bit_buff->blob_pos+length); _my_store_blob_length((uchar*) to,pack_length,length); - memcpy_fixed((char*) to+pack_length,(char*) &bit_buff->blob_pos, - sizeof(char*)); + memcpy((char*) to+pack_length, &bit_buff->blob_pos, sizeof(char*)); bit_buff->blob_pos+=length; } } diff --git a/storage/myisam/mi_rename.c b/storage/myisam/mi_rename.c index 56ccb333d03..455d45cecfe 100644 --- a/storage/myisam/mi_rename.c +++ b/storage/myisam/mi_rename.c @@ -22,28 +22,12 @@ int mi_rename(const char *old_name, const char *new_name) { char from[FN_REFLEN],to[FN_REFLEN]; -#ifdef USE_RAID - uint raid_type=0,raid_chunks=0; -#endif DBUG_ENTER("mi_rename"); #ifdef EXTRA_DEBUG check_table_is_closed(old_name,"rename old_table"); check_table_is_closed(new_name,"rename new table2"); #endif -#ifdef USE_RAID - { - MI_INFO *info; - if (!(info=mi_open(old_name, O_RDONLY, 0))) - DBUG_RETURN(my_errno); - raid_type = info->s->base.raid_type; - raid_chunks = info->s->base.raid_chunks; - mi_close(info); - } -#ifdef EXTRA_DEBUG - check_table_is_closed(old_name,"rename raidcheck"); -#endif -#endif /* USE_RAID */ fn_format(from,old_name,"",MI_NAME_IEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); fn_format(to,new_name,"",MI_NAME_IEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); @@ -51,11 +35,6 @@ int mi_rename(const char *old_name, const char *new_name) DBUG_RETURN(my_errno); fn_format(from,old_name,"",MI_NAME_DEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); fn_format(to,new_name,"",MI_NAME_DEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT); -#ifdef USE_RAID - if (raid_type) - DBUG_RETURN(my_raid_rename(from, to, raid_chunks, MYF(MY_WME)) ? my_errno : - 0); -#endif DBUG_RETURN(mysql_file_rename_with_symlink(mi_key_file_dfile, from, to, MYF(MY_WME)) ? my_errno : 0); diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c index baa01a507eb..073b127f1a3 100644 --- a/storage/myisam/mi_static.c +++ b/storage/myisam/mi_static.c @@ -38,7 +38,6 @@ uint myisam_concurrent_insert= 2; uint myisam_concurrent_insert= 0; #endif ulonglong myisam_max_temp_length= MAX_FILE_SIZE; -ulong myisam_bulk_insert_tree_size=8192*1024; ulong myisam_data_pointer_size=4; ulonglong myisam_mmap_size= SIZE_T_MAX, myisam_mmap_used= 0; diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index 742864fe241..f89f2a8d21d 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -411,7 +411,7 @@ static void create_record(uchar *record,uint rownr) tmp=strlen((char*) blob_key); int4store(pos,tmp); ptr=blob_key; - memcpy_fixed(pos+4,&ptr,sizeof(char*)); + memcpy(pos+4, &ptr, sizeof(char*)); pos+=recinfo[1].length; } else if (recinfo[1].type == FIELD_VARCHAR) @@ -439,7 +439,7 @@ static void create_record(uchar *record,uint rownr) tmp=strlen((char*) blob_record); int4store(pos,tmp); ptr=blob_record; - memcpy_fixed(pos+4,&ptr,sizeof(char*)); + memcpy(pos+4, &ptr, sizeof(char*)); } else if (recinfo[2].type == FIELD_VARCHAR) { @@ -468,10 +468,10 @@ static void update_record(uchar *record) uchar *column,*ptr; int length; length=uint4korr(pos); /* Long blob */ - memcpy_fixed(&column,pos+4,sizeof(char*)); + memcpy(&column, pos+4, sizeof(char*)); memcpy(blob_key,column,length); /* Move old key */ ptr=blob_key; - memcpy_fixed(pos+4,&ptr,sizeof(char*)); /* Store pointer to new key */ + memcpy(pos+4, &ptr, sizeof(char*)); /* Store pointer to new key */ if (keyinfo[0].seg[0].type != HA_KEYTYPE_NUM) default_charset_info->cset->casedn(default_charset_info, (char*) blob_key, length, @@ -501,13 +501,13 @@ static void update_record(uchar *record) uchar *column; int length; length=uint4korr(pos); - memcpy_fixed(&column,pos+4,sizeof(char*)); + memcpy(&column, pos+4, sizeof(char*)); memcpy(blob_record,column,length); bfill(blob_record+length,20,'.'); /* Make it larger */ length+=20; int4store(pos,length); column= blob_record; - memcpy_fixed(pos+4,&column,sizeof(char*)); + memcpy(pos+4, &column, sizeof(char*)); } else if (recinfo[2].type == FIELD_VARCHAR) { diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c index 513b390ee68..127d93b5433 100644 --- a/storage/myisam/mi_test2.c +++ b/storage/myisam/mi_test2.c @@ -1030,7 +1030,7 @@ static void put_blob_in_record(uchar *blob_pos, char **blob_buffer) for (i=0 ; i < length ; i++) (*blob_buffer)[i]=(char) (length+i); int4store(blob_pos,length); - memcpy_fixed(blob_pos+4,(char*) blob_buffer,sizeof(char*)); + memcpy(blob_pos+4, blob_buffer, sizeof(char*)); } else { diff --git a/storage/myisam/mi_test3.c b/storage/myisam/mi_test3.c index bf36d8df7f4..c03a34df227 100644 --- a/storage/myisam/mi_test3.c +++ b/storage/myisam/mi_test3.c @@ -458,7 +458,7 @@ int test_update(MI_INFO *file,int id,int lock_type) } } } - memcpy_fixed(new_record.id,record.id,sizeof(record.id)); + memcpy(new_record.id, record.id, sizeof(record.id)); tmp=rnd(20000)+40000; int4store(new_record.nr,tmp); if (!mi_update(file,record.id,new_record.id)) diff --git a/storage/myisam/mi_unique.c b/storage/myisam/mi_unique.c index fdba84a2e67..cee159951de 100644 --- a/storage/myisam/mi_unique.c +++ b/storage/myisam/mi_unique.c @@ -111,7 +111,7 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *record) else if (keyseg->flag & HA_BLOB_PART) { uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos); - memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*)); + memcpy(&pos, pos+keyseg->bit_start, sizeof(char*)); if (!length || length > tmp_length) length=tmp_length; /* The whole blob */ } @@ -206,8 +206,8 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const uchar *a, const uchar *b, set_if_smaller(a_length, keyseg->length); set_if_smaller(b_length, keyseg->length); } - memcpy_fixed((uchar*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*)); - memcpy_fixed((uchar*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*)); + memcpy(&pos_a, pos_a+keyseg->bit_start, sizeof(char*)); + memcpy(&pos_b, pos_b+keyseg->bit_start, sizeof(char*)); } if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || type == HA_KEYTYPE_VARTEXT2) diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index f2d43585eef..bd56bb04f65 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -61,11 +61,6 @@ int mi_write(MI_INFO *info, uchar *record) if (_mi_readinfo(info,F_WRLCK,1)) DBUG_RETURN(my_errno); dont_break(); /* Dont allow SIGHUP or SIGINT */ -#if !defined(NO_LOCKING) && defined(USE_RECORD_LOCK) - if (!info->locked && my_lock(info->dfile,F_WRLCK,0L,F_TO_EOF, - MYF(MY_SEEK_NOT_DONE) | info->lock_wait)) - goto err; -#endif filepos= ((share->state.dellink != HA_OFFSET_ERROR && !info->append_insert_at_end) ? share->state.dellink : diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c index 4718abc3481..1c534fe8d02 100644 --- a/storage/myisam/myisam_ftdump.c +++ b/storage/myisam/myisam_ftdump.c @@ -46,7 +46,7 @@ static struct my_option my_long_options[] = {"stats", 's', "Report global stats.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Be verbose.", - (uchar**) &verbose, (uchar**) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index e1cedf6bc31..4df76e31872 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -27,12 +27,6 @@ #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif -SET_STACK_SIZE(9000) /* Minimum stack size for program */ - -#ifndef USE_RAID -#define my_raid_create(A,B,C,D,E,F,G) my_create(A,B,C,G) -#define my_raid_delete(A,B,C) my_delete(A,B) -#endif static uint decode_bits; static char **default_argv; @@ -782,7 +776,6 @@ static int myisamchk(MI_CHECK *param, char * filename) { int error,lock_type,recreate; int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS); - uint raid_chunks; MI_INFO *info; File datafile; char llbuff[22],llbuff2[22]; @@ -844,7 +837,6 @@ static int myisamchk(MI_CHECK *param, char * filename) share->options&= ~HA_OPTION_READ_ONLY_DATA; /* We are modifing it */ share->tot_locks-= share->r_locks; share->r_locks=0; - raid_chunks=share->base.raid_chunks; /* Skip the checking of the file if: @@ -1013,9 +1005,7 @@ static int myisamchk(MI_CHECK *param, char * filename) if (param->out_flag & O_NEW_DATA) { /* Change temp file to org file */ (void) my_close(info->dfile,MYF(MY_WME)); /* Close new file */ - error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT, - raid_chunks, - MYF(0)); + error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT, MYF(0)); if (mi_open_datafile(info,info->s, NULL, -1)) error=1; param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */ @@ -1146,12 +1136,10 @@ end2: { if (param->out_flag & O_NEW_DATA) error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT, - raid_chunks, ((param->testflag & T_BACKUP_DATA) ? MYF(MY_REDEL_MAKE_BACKUP) : MYF(0))); if (param->out_flag & O_NEW_INDEX) - error|=change_to_newfile(filename,MI_NAME_IEXT,INDEX_TMP_EXT,0, - MYF(0)); + error|=change_to_newfile(filename, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0)); } (void) fflush(stdout); (void) fflush(stderr); if (param->error_printed) @@ -1247,16 +1235,9 @@ static void descript(MI_CHECK *param, register MI_INFO *info, char * name) share->base.auto_key, llstr(share->state.auto_increment,llbuff)); } - if (share->base.raid_type) - { - printf("RAID: Type: %u Chunks: %u Chunksize: %lu\n", - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize); - } if (share->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) printf("Checksum: %23s\n",llstr(info->state->checksum,llbuff)); -; + if (share->options & HA_OPTION_DELAY_KEY_WRITE) printf("Keys are only flushed at close\n"); @@ -1527,14 +1508,11 @@ static int mi_sort_records(MI_CHECK *param, goto err; } fn_format(param->temp_filename,name,"", MI_NAME_DEXT,2+4+32); - new_file=my_raid_create(fn_format(param->temp_filename, - param->temp_filename,"", - DATA_TMP_EXT,2+4), - 0,param->tmpfile_createflag, - share->base.raid_type, - share->base.raid_chunks, - share->base.raid_chunksize, - MYF(0)); + new_file= my_create(fn_format(param->temp_filename, + param->temp_filename, "", + DATA_TMP_EXT, 2+4), + 0, param->tmpfile_createflag, + MYF(0)); if (new_file < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", @@ -1609,8 +1587,7 @@ err: { (void) end_io_cache(&info->rec_cache); (void) my_close(new_file,MYF(MY_WME)); - (void) my_raid_delete(param->temp_filename, share->base.raid_chunks, - MYF(MY_WME)); + (void) my_delete(param->temp_filename, MYF(MY_WME)); } if (temp_buff) { diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 130a96bc9e0..c7f0cb27a40 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -26,8 +26,9 @@ #endif #include <mysql/psi/mysql_file.h> -#if defined(my_write) && !defined(MAP_TO_USE_RAID) -#undef my_write /* undef map from my_nosys; We need test-if-disk full */ +/* undef map from my_nosys; We need test-if-disk full */ +#if defined(my_write) +#undef my_write #endif typedef struct st_mi_status_info @@ -130,9 +131,6 @@ typedef struct st_mi_base_info /* Extra allocation when using dynamic record format */ uint extra_alloc_bytes; uint extra_alloc_procent; - /* Info about raid */ - uint raid_type,raid_chunks; - ulong raid_chunksize; /* The following are from the header */ uint key_parts,all_key_parts; } MI_BASE_INFO; @@ -544,10 +542,6 @@ void _mi_store_static_key(MI_KEYDEF *keyinfo, uchar *key_pos, MI_KEY_PARAM *s_temp); void _mi_store_var_pack_key(MI_KEYDEF *keyinfo, uchar *key_pos, MI_KEY_PARAM *s_temp); -#ifdef NOT_USED -void _mi_store_pack_key(MI_KEYDEF *keyinfo, uchar *key_pos, - MI_KEY_PARAM *s_temp); -#endif void _mi_store_bin_pack_key(MI_KEYDEF *keyinfo, uchar *key_pos, MI_KEY_PARAM *s_temp); diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c index d3da0eab22c..84743b8da51 100644 --- a/storage/myisam/myisamlog.c +++ b/storage/myisam/myisamlog.c @@ -619,7 +619,7 @@ static int examine_log(char * file_name, char **table_names) case MI_LOG_LOCK: if (my_b_read(&cache,(uchar*) head,sizeof(lock_command))) goto err; - memcpy_fixed(&lock_command,head,sizeof(lock_command)); + memcpy(&lock_command, head, sizeof(lock_command)); if (verbose && !record_pos_file && (!table_names[0] || (curr_file_info && curr_file_info->used))) printf_log("%s: %s(%d) -> %d\n",FILENAME(curr_file_info), @@ -728,7 +728,7 @@ static void fix_blob_pointers(MI_INFO *info, uchar *record) blob != end ; blob++) { - memcpy_fixed(record+blob->offset+blob->pack_length,&pos,sizeof(char*)); + memcpy(record+blob->offset+blob->pack_length, &pos, sizeof(char*)); pos+=_mi_calc_blob_length(blob->pack_length,record+blob->offset); } } diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 4cd305fdc69..84a7f2a1ba9 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -1040,7 +1040,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) { uint field_length=count->field_length -portable_sizeof_char_ptr; ulong blob_length= _mi_calc_blob_length(field_length, start_pos); - memcpy_fixed((char*) &pos, start_pos+field_length,sizeof(char*)); + memcpy(&pos, start_pos+field_length, sizeof(char*)); end_pos=pos+blob_length; tot_blob_length+=blob_length; set_if_bigger(count->max_length,blob_length); @@ -1889,7 +1889,7 @@ static uint join_same_trees(HUFF_COUNTS *huff_counts, uint trees) i->tree->tree_pack_length+j->tree->tree_pack_length+ ALLOWED_JOIN_DIFF) { - memcpy_fixed((uchar*) i->counts,(uchar*) count.counts, + memcpy(i->counts, count.counts, sizeof(count.counts[0])*256); my_free(j->tree->element_buffer); j->tree->element_buffer=0; @@ -2040,7 +2040,7 @@ static int write_header(PACK_MRG_INFO *mrg,uint head_length,uint trees, uchar *buff= (uchar*) file_buffer.pos; bzero(buff,HEAD_LENGTH); - memcpy_fixed(buff,myisam_pack_file_magic,4); + memcpy(buff,myisam_pack_file_magic,4); int4store(buff+4,head_length); int4store(buff+8, mrg->min_pack_length); int4store(buff+12,mrg->max_pack_length); @@ -2697,8 +2697,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) DBUG_PRINT("fields", ("FIELD_BLOB %lu bytes, bits: %2u", blob_length, count->length_bits)); write_bits(blob_length,count->length_bits); - memcpy_fixed(&blob,end_pos-portable_sizeof_char_ptr, - sizeof(char*)); + memcpy(&blob, end_pos-portable_sizeof_char_ptr, sizeof(char*)); blob_end=blob+blob_length; /* Encode the blob bytes. */ for ( ; blob < blob_end ; blob++) diff --git a/storage/myisam/rt_test.c b/storage/myisam/rt_test.c index 4a9b61605d9..7233300c539 100644 --- a/storage/myisam/rt_test.c +++ b/storage/myisam/rt_test.c @@ -366,25 +366,6 @@ static int read_with_pos (MI_INFO * file,int silent) } -#ifdef NOT_USED -static void bprint_record(char * record, - my_off_t offs __attribute__((unused)), - const char * tail) -{ - int i; - char * pos; - i=(unsigned char)record[0]; - printf("%02X ",i); - - for( pos=record+1, i=0; i<32; i++,pos++){ - int b=(unsigned char)*pos; - printf("%02X",b); - } - printf("%s",tail); -} -#endif - - static void print_record(uchar * record, my_off_t offs __attribute__((unused)), const char * tail) @@ -424,30 +405,6 @@ static void create_record1(uchar *record,uint rownr) } } -#ifdef NOT_USED - -static void create_record0(uchar *record,uint rownr) -{ - int i; - char * pos; - double c=rownr+10; - double c0=0; - - bzero((char*) record,MAX_REC_LENGTH); - record[0]=0x01; /* DEL marker */ - - for ( pos=record+1, i=0; i<ndims; i++) - { - memcpy(pos,&c0,sizeof(c0)); - float8store(pos,c0); - pos+=sizeof(c0); - memcpy(pos,&c,sizeof(c)); - float8store(pos,c); - pos+=sizeof(c); - } -} - -#endif static void create_record(uchar *record,uint rownr) { diff --git a/storage/myisam/sp_key.c b/storage/myisam/sp_key.c index 3748a38ff81..bde0e1cb388 100644 --- a/storage/myisam/sp_key.c +++ b/storage/myisam/sp_key.c @@ -47,7 +47,7 @@ uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key, pos = (uchar*)record + keyseg->start; dlen = _mi_calc_blob_length(keyseg->bit_start, pos); - memcpy_fixed(&dptr, pos + keyseg->bit_start, sizeof(char*)); + memcpy(&dptr, pos + keyseg->bit_start, sizeof(char*)); if (!dptr) { my_errno= HA_ERR_NULL_IN_SPATIAL; diff --git a/storage/myisam/sp_test.c b/storage/myisam/sp_test.c index 069f43c320d..d86fdc03908 100644 --- a/storage/myisam/sp_test.c +++ b/storage/myisam/sp_test.c @@ -299,26 +299,6 @@ static int read_with_pos (MI_INFO * file,int silent) } -#ifdef NOT_USED -static void bprint_record(uchar * record, - my_off_t offs __attribute__((unused)), - const char * tail) -{ - int i; - char * pos; - i=(unsigned char)record[0]; - printf("%02X ",i); - - for( pos=record+1, i=0; i<32; i++,pos++) - { - int b=(unsigned char)*pos; - printf("%02X",b); - } - printf("%s",tail); -} -#endif - - static void print_record(uchar * record, my_off_t offs,const char * tail) { uchar *pos; @@ -330,7 +310,7 @@ static void print_record(uchar * record, my_off_t offs,const char * tail) len=sint4korr(pos); pos+=4; printf(" len=%d ",len); - memcpy_fixed(&ptr,pos,sizeof(char*)); + memcpy(&ptr, pos, sizeof(char*)); if (ptr) rtree_PrintWKB((uchar*) ptr,SPDIMS); else @@ -340,34 +320,6 @@ static void print_record(uchar * record, my_off_t offs,const char * tail) } -#ifdef NOT_USED -static void create_point(uchar *record,uint rownr) -{ - uint tmp; - char *ptr; - char *pos=record; - double x[200]; - int i; - - for(i=0;i<SPDIMS;i++) - x[i]=rownr; - - bzero((char*) record,MAX_REC_LENGTH); - *pos=0x01; /* DEL marker */ - pos++; - - memset(blob_key,0,sizeof(blob_key)); - tmp=rtree_CreatePointWKB(x,SPDIMS,blob_key); - - int4store(pos,tmp); - pos+=4; - - ptr=blob_key; - memcpy_fixed(pos,&ptr,sizeof(char*)); -} -#endif - - static void create_linestring(uchar *record,uint rownr) { uint tmp; @@ -376,23 +328,23 @@ static void create_linestring(uchar *record,uint rownr) double x[200]; int i,j; int npoints=2; - + for(j=0;j<npoints;j++) for(i=0;i<SPDIMS;i++) x[i+j*SPDIMS]=rownr*j; - + bzero((char*) record,MAX_REC_LENGTH); *pos=0x01; /* DEL marker */ pos++; - + memset(blob_key,0,sizeof(blob_key)); tmp=rtree_CreateLineStringWKB(x,SPDIMS,npoints, (uchar*) blob_key); - + int4store(pos,tmp); pos+=4; - + ptr=blob_key; - memcpy_fixed(pos,&ptr,sizeof(char*)); + memcpy(pos, &ptr, sizeof(char*)); } @@ -401,7 +353,7 @@ static void create_key(uchar *key,uint rownr) double c=rownr; uchar *pos; uint i; - + bzero(key,MAX_REC_LENGTH); for (pos=key, i=0; i<2*SPDIMS; i++) { @@ -426,27 +378,6 @@ static void print_key(const uchar *key,const char * tail) } -#ifdef NOT_USED - -static int rtree_CreatePointWKB(double *ords, uint n_dims, uchar *wkb) -{ - uint i; - - *wkb = wkbXDR; - ++wkb; - int4store(wkb, wkbPoint); - wkb += 4; - - for (i=0; i < n_dims; ++i) - { - float8store(wkb, ords[i]); - wkb += 8; - } - return 5 + n_dims * 8; -} -#endif - - static int rtree_CreateLineStringWKB(double *ords, uint n_dims, uint n_points, uchar *wkb) { diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index f41629ff882..f62aff4e383 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -642,7 +642,7 @@ extern "C" MI_INFO *myisammrg_attach_children_callback(void *callback_param) my_errno= HA_ERR_WRONG_MRG_TABLE_DEF; } DBUG_PRINT("myrg", ("MyISAM handle: 0x%lx my_errno: %d", - my_errno ? NULL : (long) myisam, my_errno)); + my_errno ? 0L : (long) myisam, my_errno)); end: DBUG_RETURN(myisam); diff --git a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp index 8126267f946..95dbf5204f1 100644 --- a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp +++ b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp @@ -203,7 +203,7 @@ Uint32 FsOpenReq::getVersion(const Uint32 fileNumber[]){ inline void FsOpenReq::setVersion(Uint32 fileNumber[], Uint8 val){ const Uint32 t = fileNumber[3]; - fileNumber[3] = t & 0x00FFFFFF | (((Uint32)val) << 24); + fileNumber[3] = (t & 0x00FFFFFF) | (((Uint32)val) << 24); } inline @@ -214,7 +214,7 @@ Uint32 FsOpenReq::getSuffix(const Uint32 fileNumber[]){ inline void FsOpenReq::setSuffix(Uint32 fileNumber[], Uint8 val){ const Uint32 t = fileNumber[3]; - fileNumber[3] = t & 0xFF00FFFF | (((Uint32)val) << 16); + fileNumber[3] = (t & 0xFF00FFFF) | (((Uint32)val) << 16); } inline @@ -225,7 +225,7 @@ Uint32 FsOpenReq::v1_getDisk(const Uint32 fileNumber[]){ inline void FsOpenReq::v1_setDisk(Uint32 fileNumber[], Uint8 val){ const Uint32 t = fileNumber[3]; - fileNumber[3] = t & 0xFFFF00FF | (((Uint32)val) << 8); + fileNumber[3] = (t & 0xFFFF00FF) | (((Uint32)val) << 8); } inline @@ -266,7 +266,7 @@ Uint32 FsOpenReq::v1_getP(const Uint32 fileNumber[]){ inline void FsOpenReq::v1_setP(Uint32 fileNumber[], Uint8 val){ const Uint32 t = fileNumber[3]; - fileNumber[3] = t & 0xFFFFFF00 | val; + fileNumber[3] = (t & 0xFFFFFF00) | val; } /****************/ diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h index f18bb9646cc..59d9eaf4d33 100644 --- a/storage/ndb/include/util/ndb_opts.h +++ b/storage/ndb/include/util/ndb_opts.h @@ -58,40 +58,40 @@ const char *opt_debug= 0; "Set connect string for connecting to ndb_mgmd. " \ "Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \ "Overrides specifying entries in NDB_CONNECTSTRING and my.cnf", \ - (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \ + &opt_ndb_connectstring, &opt_ndb_connectstring, \ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-mgmd-host", OPT_NDB_MGMD, \ "Set host and port for connecting to ndb_mgmd. " \ "Syntax: <hostname>[:<port>].", \ - (uchar**) &opt_ndb_mgmd, (uchar**) &opt_ndb_mgmd, 0, \ + &opt_ndb_mgmd, &opt_ndb_mgmd, 0, \ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-nodeid", OPT_NDB_NODEID, \ "Set node id for this node.", \ - (uchar**) &opt_ndb_nodeid, (uchar**) &opt_ndb_nodeid, 0, \ + &opt_ndb_nodeid, &opt_ndb_nodeid, 0, \ GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "ndb-shm", OPT_NDB_SHM,\ "Allow optimizing using shared memory connections when available",\ - (uchar**) &opt_ndb_shm, (uchar**) &opt_ndb_shm, 0,\ + &opt_ndb_shm, &opt_ndb_shm, 0,\ GET_BOOL, NO_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0 },\ {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION,\ "Select nodes for transactions in a more optimal way",\ - (uchar**) &opt_ndb_optimized_node_selection,\ - (uchar**) &opt_ndb_optimized_node_selection, 0,\ + &opt_ndb_optimized_node_selection,\ + &opt_ndb_optimized_node_selection, 0,\ GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\ { "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\ - (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \ + &opt_ndb_connectstring, &opt_ndb_connectstring, \ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ { "core-file", OPT_WANT_CORE, "Write core on errors.",\ - (uchar**) &opt_core, (uchar**) &opt_core, 0,\ + &opt_core, &opt_core, 0,\ GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0},\ {"character-sets-dir", OPT_CHARSETS_DIR,\ - "Directory where character sets are.", (uchar**) &charsets_dir,\ - (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\ + "Directory where character sets are.", &charsets_dir,\ + &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\ #ifndef DBUG_OFF #define NDB_STD_OPTS(prog_name) \ { "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \ - (uchar**) &opt_debug, (uchar**) &opt_debug, \ + &opt_debug, &opt_debug, \ 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \ NDB_STD_OPTS_COMMON #else diff --git a/storage/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp index d5c31d610cb..b750c00bc2a 100644 --- a/storage/ndb/src/cw/cpcd/main.cpp +++ b/storage/ndb/src/cw/cpcd/main.cpp @@ -39,22 +39,22 @@ static const char *user = 0; static struct my_option my_long_options[] = { { "work-dir", 'w', "Work directory", - (uchar**) &work_dir, (uchar**) &work_dir, 0, + &work_dir, &work_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "port", 'p', "TCP port to listen on", - (uchar**) &port, (uchar**) &port, 0, + &port, &port, 0, GET_INT, REQUIRED_ARG, CPCD_DEFAULT_TCP_PORT, 0, 0, 0, 0, 0 }, { "syslog", 'S', "Log events to syslog", - (uchar**) &use_syslog, (uchar**) &use_syslog, 0, + &use_syslog, &use_syslog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "logfile", 'L', "File to log events to", - (uchar**) &logfile, (uchar**) &logfile, 0, + &logfile, &logfile, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "debug", 'D', "Enable debug mode", - (uchar**) &debug, (uchar**) &debug, 0, + &debug, &debug, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "user", 'u', "Run as user", - (uchar**) &user, (uchar**) &user, 0, + &user, &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp index 67ce7a1760a..3917d415575 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp @@ -840,13 +840,13 @@ Dbtux::TreeEnt::cmp(const TreeEnt ent) const */ const unsigned version_wrap_limit = (1 << (ZTUP_VERSION_BITS - 1)); if (m_tupVersion < ent.m_tupVersion) { - if (ent.m_tupVersion - m_tupVersion < version_wrap_limit) + if (unsigned(ent.m_tupVersion - m_tupVersion) < version_wrap_limit) return -1; else return +1; } if (m_tupVersion > ent.m_tupVersion) { - if (m_tupVersion - ent.m_tupVersion < version_wrap_limit) + if (unsigned(m_tupVersion - ent.m_tupVersion) < version_wrap_limit) return +1; else return -1; diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp index 72770d35cde..f1e608738e3 100644 --- a/storage/ndb/src/kernel/vm/Configuration.cpp +++ b/storage/ndb/src/kernel/vm/Configuration.cpp @@ -74,35 +74,35 @@ static struct my_option my_long_options[] = { "initial", OPT_INITIAL, "Perform initial start of ndbd, including cleaning the file system. " "Consult documentation before using this", - (uchar**) &_initial, (uchar**) &_initial, 0, + &_initial, &_initial, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nostart", 'n', "Don't start ndbd immediately. Ndbd will await command from ndb_mgmd", - (uchar**) &_no_start, (uchar**) &_no_start, 0, + &_no_start, &_no_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "daemon", 'd', "Start ndbd as daemon (default)", - (uchar**) &_daemon, (uchar**) &_daemon, 0, + &_daemon, &_daemon, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "nodaemon", OPT_NODAEMON, "Do not start ndbd as daemon, provided for testing purposes", - (uchar**) &_no_daemon, (uchar**) &_no_daemon, 0, + &_no_daemon, &_no_daemon, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "foreground", OPT_FOREGROUND, "Run real ndbd in foreground, provided for debugging purposes" " (implies --nodaemon)", - (uchar**) &_foreground, (uchar**) &_foreground, 0, + &_foreground, &_foreground, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nowait-nodes", OPT_NOWAIT_NODES, "Nodes that will not be waited for during start", - (uchar**) &_nowait_nodes, (uchar**) &_nowait_nodes, 0, + &_nowait_nodes, &_nowait_nodes, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "initial-start", OPT_INITIAL_START, "Perform initial start", - (uchar**) &_initialstart, (uchar**) &_initialstart, 0, + &_initialstart, &_initialstart, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "bind-address", OPT_NOWAIT_NODES, "Local bind address", - (uchar**) &_bind_address, (uchar**) &_bind_address, 0, + &_bind_address, &_bind_address, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp index 980530953ad..7049bdd12e0 100644 --- a/storage/ndb/src/mgmclient/main.cpp +++ b/storage/ndb/src/mgmclient/main.cpp @@ -73,11 +73,11 @@ static struct my_option my_long_options[] = NDB_STD_OPTS("ndb_mgm"), { "execute", 'e', "execute command and exit", - (uchar**) &opt_execute_str, (uchar**) &opt_execute_str, 0, + &opt_execute_str, &opt_execute_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "try-reconnect", 't', "Specify number of tries for connecting to ndb_mgmd (0 = infinite)", - (uchar**) &_try_reconnect, (uchar**) &_try_reconnect, 0, + &_try_reconnect, &_try_reconnect, 0, GET_UINT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp index 26198a44a23..e0d9a550cd2 100644 --- a/storage/ndb/src/mgmsrv/main.cpp +++ b/storage/ndb/src/mgmsrv/main.cpp @@ -142,29 +142,29 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_mgmd"), { "config-file", 'f', "Specify cluster configuration file", - (uchar**) &opt_config_filename, (uchar**) &opt_config_filename, 0, + &opt_config_filename, &opt_config_filename, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "print-full-config", 'P', "Print full config and exit", - (uchar**) &g_print_full_config, (uchar**) &g_print_full_config, 0, + &g_print_full_config, &g_print_full_config, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "daemon", 'd', "Run ndb_mgmd in daemon mode (default)", - (uchar**) &opt_daemon, (uchar**) &opt_daemon, 0, + &opt_daemon, &opt_daemon, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "interactive", OPT_INTERACTIVE, "Run interactive. Not supported but provided for testing purposes", - (uchar**) &opt_interactive, (uchar**) &opt_interactive, 0, + &opt_interactive, &opt_interactive, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-nodeid-checks", OPT_NO_NODEID_CHECKS, "Do not provide any node id checks", - (uchar**) &g_no_nodeid_checks, (uchar**) &g_no_nodeid_checks, 0, + &g_no_nodeid_checks, &g_no_nodeid_checks, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nodaemon", OPT_NO_DAEMON, "Don't run as daemon, but don't read from stdin", - (uchar**) &opt_non_interactive, (uchar**) &opt_non_interactive, 0, + &opt_non_interactive, &opt_non_interactive, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "mycnf", 256, "Read cluster config from my.cnf", - (uchar**) &opt_mycnf, (uchar**) &opt_mycnf, 0, + &opt_mycnf, &opt_mycnf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp index 23fea8792f7..cbda9de6df1 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.hpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp @@ -366,8 +366,8 @@ bool TransporterFacade::get_node_stopping(NodeId n) const { const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n); return (!node.m_state.getSingleUserMode() && - (node.m_state.startLevel == NodeState::SL_STOPPING_1) || - (node.m_state.startLevel == NodeState::SL_STOPPING_2)); + ((node.m_state.startLevel == NodeState::SL_STOPPING_1) || + (node.m_state.startLevel == NodeState::SL_STOPPING_2))); } inline diff --git a/storage/ndb/test/ndbapi/testIndexStat.cpp b/storage/ndb/test/ndbapi/testIndexStat.cpp index 559fade3132..3b3e593081b 100644 --- a/storage/ndb/test/ndbapi/testIndexStat.cpp +++ b/storage/ndb/test/ndbapi/testIndexStat.cpp @@ -1297,43 +1297,43 @@ my_long_options[] = { NDB_STD_OPTS("testIndexStat"), { "loglevel", 1001, "Logging level in this program 0-3 (default 0)", - (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0, + &g_opts.loglevel, &g_opts.loglevel, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "seed", 1002, "Random seed (0=loop number, default -1=random)", - (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0, + &g_opts.seed, &g_opts.seed, 0, GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 }, { "loop", 1003, "Number of test loops (default 1, 0=forever)", - (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0, + &g_opts.loop, &g_opts.loop, 0, GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 }, { "rows", 1004, "Number of rows (default 100000)", - (uchar **)&g_opts.rows, (uchar **)&g_opts.rows, 0, + &g_opts.rows, &g_opts.rows, 0, GET_UINT, REQUIRED_ARG, 100000, 0, 0, 0, 0, 0 }, { "ops", 1005, "Number of index scans per loop (default 1000)", - (uchar **)&g_opts.ops, (uchar **)&g_opts.ops, 0, + &g_opts.ops, &g_opts.ops, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "dupkeys", 1006, "Pct records per key (min 100, default 1000)", - (uchar **)&g_opts.dupkeys, (uchar **)&g_opts.dupkeys, 0, + &g_opts.dupkeys, &g_opts.dupkeys, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "scanpct", 1007, "Preferred max pct of total rows per scan (default 5)", - (uchar **)&g_opts.scanpct, (uchar **)&g_opts.scanpct, 0, + &g_opts.scanpct, &g_opts.scanpct, 0, GET_UINT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "nullkeys", 1008, "Pct nulls in each key attribute (default 10)", - (uchar **)&g_opts.nullkeys, (uchar **)&g_opts.nullkeys, 0, + &g_opts.nullkeys, &g_opts.nullkeys, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "eqscans", 1009, "Pct scans for partial/full equality (default 50)", - (uchar **)&g_opts.eqscans, (uchar **)&g_opts.eqscans, 0, + &g_opts.eqscans, &g_opts.eqscans, 0, GET_UINT, REQUIRED_ARG, 50, 0, 0, 0, 0, 0 }, { "dupscans", 1010, "Pct scans using same bounds (default 10)", - (uchar **)&g_opts.dupscans, (uchar **)&g_opts.dupscans, 0, + &g_opts.dupscans, &g_opts.dupscans, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "keeptable", 1011, "Use existing table and data if any and do not drop", - (uchar **)&g_opts.keeptable, (uchar **)&g_opts.keeptable, 0, + &g_opts.keeptable, &g_opts.keeptable, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-extra-checks", 1012, "Omit expensive consistency checks", - (uchar **)&g_opts.nochecks, (uchar **)&g_opts.nochecks, 0, + &g_opts.nochecks, &g_opts.nochecks, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "abort-on-error", 1013, "Dump core on any error", - (uchar **)&g_opts.abort, (uchar **)&g_opts.abort, 0, + &g_opts.abort, &g_opts.abort, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, diff --git a/storage/ndb/test/ndbapi/test_event_merge.cpp b/storage/ndb/test/ndbapi/test_event_merge.cpp index d40b985adc2..c4109a23119 100644 --- a/storage/ndb/test/ndbapi/test_event_merge.cpp +++ b/storage/ndb/test/ndbapi/test_event_merge.cpp @@ -2184,57 +2184,57 @@ my_long_options[] = { NDB_STD_OPTS("test_event_merge"), { "abort-on-error", 1001, "Do abort() on any error", - (uchar **)&g_opts.abort_on_error, (uchar **)&g_opts.abort_on_error, 0, + &g_opts.abort_on_error, &g_opts.abort_on_error, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "loglevel", 1002, "Logging level in this program 0-3 (default 0)", - (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0, + &g_opts.loglevel, &g_opts.loglevel, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "loop", 1003, "Number of test loops (default 5, 0=forever)", - (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0, + &g_opts.loop, &g_opts.loop, 0, GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "maxops", 1004, "Approx number of PK operations per table (default 1000)", - (uchar **)&g_opts.maxops, (uchar **)&g_opts.maxops, 0, + &g_opts.maxops, &g_opts.maxops, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "maxpk", 1005, "Number of different PK values (default 10, max 1000)", - (uchar **)&g_opts.maxpk, (uchar **)&g_opts.maxpk, 0, + &g_opts.maxpk, &g_opts.maxpk, 0, GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "maxtab", 1006, "Number of tables (default 10, max 100)", - (uchar **)&g_opts.maxtab, (uchar **)&g_opts.maxtab, 0, + &g_opts.maxtab, &g_opts.maxtab, 0, GET_INT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 }, { "no-blobs", 1007, "Omit blob attributes (5.0: true)", - (uchar **)&g_opts.no_blobs, (uchar **)&g_opts.no_blobs, 0, + &g_opts.no_blobs, &g_opts.no_blobs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-implicit-nulls", 1008, "Insert must include all attrs" " i.e. no implicit NULLs", - (uchar **)&g_opts.no_implicit_nulls, (uchar **)&g_opts.no_implicit_nulls, 0, + &g_opts.no_implicit_nulls, &g_opts.no_implicit_nulls, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-missing-update", 1009, "Update must include all non-PK attrs", - (uchar **)&g_opts.no_missing_update, (uchar **)&g_opts.no_missing_update, 0, + &g_opts.no_missing_update, &g_opts.no_missing_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-multiops", 1010, "Allow only 1 operation per commit", - (uchar **)&g_opts.no_multiops, (uchar **)&g_opts.no_multiops, 0, + &g_opts.no_multiops, &g_opts.no_multiops, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-nulls", 1011, "Create no NULL values", - (uchar **)&g_opts.no_nulls, (uchar **)&g_opts.no_nulls, 0, + &g_opts.no_nulls, &g_opts.no_nulls, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "one-blob", 1012, "Only one blob attribute (default 2)", - (uchar **)&g_opts.one_blob, (uchar **)&g_opts.one_blob, 0, + &g_opts.one_blob, &g_opts.one_blob, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "opstring", 1013, "Operations to run e.g. idiucdc (c is commit) or" " iuuc:uudc (the : separates loops)", - (uchar **)&g_opts.opstring, (uchar **)&g_opts.opstring, 0, + &g_opts.opstring, &g_opts.opstring, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "seed", 1014, "Random seed (0=loop number, default -1=random)", - (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0, + &g_opts.seed, &g_opts.seed, 0, GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 }, { "separate-events", 1015, "Do not combine events per GCI (5.0: true)", - (uchar **)&g_opts.separate_events, (uchar **)&g_opts.separate_events, 0, + &g_opts.separate_events, &g_opts.separate_events, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tweak", 1016, "Whatever the source says", - (uchar **)&g_opts.tweak, (uchar **)&g_opts.tweak, 0, + &g_opts.tweak, &g_opts.tweak, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "use-table", 1017, "Use existing tables", - (uchar **)&g_opts.use_table, (uchar **)&g_opts.use_table, 0, + &g_opts.use_table, &g_opts.use_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, diff --git a/storage/ndb/test/ndbapi/test_event_multi_table.cpp b/storage/ndb/test/ndbapi/test_event_multi_table.cpp index 36fb6f511ae..7fbd43ef5eb 100644 --- a/storage/ndb/test/ndbapi/test_event_multi_table.cpp +++ b/storage/ndb/test/ndbapi/test_event_multi_table.cpp @@ -258,7 +258,7 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS(""), { "database", 'd', "Name of database table is in", - (gptr*) &_dbname, (gptr*) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp index b5c4385f5d3..397eaf8b77e 100644 --- a/storage/ndb/test/run-test/main.cpp +++ b/storage/ndb/test/run-test/main.cpp @@ -77,60 +77,60 @@ my_bool opt_core; static struct my_option g_options[] = { { "help", '?', "Display this help and exit.", - (uchar **) &g_help, (uchar **) &g_help, + &g_help, &g_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "clusters", 256, "Cluster", - (uchar **) &g_clusters, (uchar **) &g_clusters, + &g_clusters, &g_clusters, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "replicate", 1024, "replicate", - (uchar **) &g_dummy, (uchar **) &g_dummy, + &g_dummy, &g_dummy, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "log-file", 256, "log-file", - (uchar **) &g_log_filename, (uchar **) &g_log_filename, + &g_log_filename, &g_log_filename, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "testcase-file", 'f', "testcase-file", - (uchar **) &g_test_case_filename, (uchar **) &g_test_case_filename, + &g_test_case_filename, &g_test_case_filename, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "report-file", 'r', "report-file", - (uchar **) &g_report_filename, (uchar **) &g_report_filename, + &g_report_filename, &g_report_filename, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "basedir", 256, "Base path", - (uchar **) &g_basedir, (uchar **) &g_basedir, + &g_basedir, &g_basedir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "baseport", 256, "Base port", - (uchar **) &g_baseport, (uchar **) &g_baseport, + &g_baseport, &g_baseport, 0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0}, { "prefix", 256, "mysql install dir", - (uchar **) &g_prefix, (uchar **) &g_prefix, + &g_prefix, &g_prefix, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "verbose", 'v', "Verbosity", - (uchar **) &g_verbosity, (uchar **) &g_verbosity, + &g_verbosity, &g_verbosity, 0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0}, { "configure", 256, "configure", - (uchar **) &g_do_setup, (uchar **) &g_do_setup, + &g_do_setup, &g_do_setup, 0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 }, { "deploy", 256, "deploy", - (uchar **) &g_do_deploy, (uchar **) &g_do_deploy, + &g_do_deploy, &g_do_deploy, 0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 }, { "sshx", 256, "sshx", - (uchar **) &g_do_sshx, (uchar **) &g_do_sshx, + &g_do_sshx, &g_do_sshx, 0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 }, { "start", 256, "start", - (uchar **) &g_do_start, (uchar **) &g_do_start, + &g_do_start, &g_do_start, 0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 }, { "fqpn", 256, "Fully qualified path-names ", - (uchar **) &g_fqpn, (uchar **) &g_fqpn, + &g_fqpn, &g_fqpn, 0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 }, { "default-ports", 256, "Use default ports when possible", - (uchar **) &g_default_ports, (uchar **) &g_default_ports, + &g_default_ports, &g_default_ports, 0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 }, { "mode", 256, "Mode 0=interactive 1=regression 2=bench", - (uchar **) &g_mode, (uchar **) &g_mode, + &g_mode, &g_mode, 0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 }, { "quit", 256, "Quit before starting tests", - (uchar **) &g_mode, (uchar **) &g_do_quit, + &g_mode, &g_do_quit, 0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp index 69f3723ca75..b7b830af23d 100644 --- a/storage/ndb/test/src/NDBT_Test.cpp +++ b/storage/ndb/test/src/NDBT_Test.cpp @@ -1195,35 +1195,35 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS(""), { "print", OPT_PRINT, "Print execution tree", - (uchar **) &opt_print, (uchar **) &opt_print, 0, + &opt_print, &opt_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_html", OPT_PRINT_HTML, "Print execution tree in html table format", - (uchar **) &opt_print_html, (uchar **) &opt_print_html, 0, + &opt_print_html, &opt_print_html, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_cases", OPT_PRINT_CASES, "Print list of test cases", - (uchar **) &opt_print_cases, (uchar **) &opt_print_cases, 0, + &opt_print_cases, &opt_print_cases, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "records", 'r', "Number of records", - (uchar **) &opt_records, (uchar **) &opt_records, 0, + &opt_records, &opt_records, 0, GET_INT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 }, { "loops", 'l', "Number of loops", - (uchar **) &opt_loops, (uchar **) &opt_loops, 0, + &opt_loops, &opt_loops, 0, GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 }, { "seed", 1024, "Random seed", - (uchar **) &opt_seed, (uchar **) &opt_seed, 0, + &opt_seed, &opt_seed, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "testname", 'n', "Name of test to run", - (uchar **) &opt_testname, (uchar **) &opt_testname, 0, + &opt_testname, &opt_testname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "remote_mgm", 'm', "host:port to mgmsrv of remote cluster", - (uchar **) &opt_remote_mgm, (uchar **) &opt_remote_mgm, 0, + &opt_remote_mgm, &opt_remote_mgm, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "timer", 't', "Print execution time", - (uchar **) &opt_timer, (uchar **) &opt_timer, 0, + &opt_timer, &opt_timer, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "verbose", 'v', "Print verbose status", - (uchar **) &opt_verbose, (uchar **) &opt_verbose, 0, + &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/test/tools/connect.cpp b/storage/ndb/test/tools/connect.cpp index 278dbe833ea..d12d1b7a608 100644 --- a/storage/ndb/test/tools/connect.cpp +++ b/storage/ndb/test/tools/connect.cpp @@ -31,14 +31,14 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "loop", 'l', "loops", - (gptr*) &_loop, (gptr*) &_loop, 0, + &_loop, &_loop, 0, GET_INT, REQUIRED_ARG, _loop, 0, 0, 0, 0, 0 }, { "sleep", 's', "Sleep (ms) between connection attempt", - (gptr*) &_sleep, (gptr*) &_sleep, 0, + &_sleep, &_sleep, 0, GET_INT, REQUIRED_ARG, _sleep, 0, 0, 0, 0, 0 }, { "drop", 'd', "Drop event operations before disconnect (0 = no, 1 = yes, else rand", - (gptr*) &_drop, (gptr*) &_drop, 0, + &_drop, &_drop, 0, GET_INT, REQUIRED_ARG, _drop, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/delete_all.cpp b/storage/ndb/tools/delete_all.cpp index 1bf89f5a32f..23d1ef387d2 100644 --- a/storage/ndb/tools/delete_all.cpp +++ b/storage/ndb/tools/delete_all.cpp @@ -36,16 +36,16 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "transactional", 't', "Single transaction (may run out of operations)", - (uchar**) &_transactional, (uchar**) &_transactional, 0, + &_transactional, &_transactional, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tupscan", 999, "Run tupscan", - (uchar**) &_tupscan, (uchar**) &_tupscan, 0, + &_tupscan, &_tupscan, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "diskscan", 999, "Run diskcan", - (uchar**) &_diskscan, (uchar**) &_diskscan, 0, + &_diskscan, &_diskscan, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index 831005139de..f31b4f6ae1b 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -39,16 +39,16 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "unqualified", 'u', "Use unqualified table names", - (uchar**) &_unqualified, (uchar**) &_unqualified, 0, + &_unqualified, &_unqualified, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "extra-partition-info", 'p', "Print more info per partition", - (uchar**) &_partinfo, (uchar**) &_partinfo, 0, + &_partinfo, &_partinfo, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "retries", 'r', "Retry every second for # retries", - (uchar**) &_retries, (uchar**) &_retries, 0, + &_retries, &_retries, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp index ec88f331a80..82dd595f7df 100644 --- a/storage/ndb/tools/drop_index.cpp +++ b/storage/ndb/tools/drop_index.cpp @@ -30,7 +30,7 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp index 8d07afbbf50..1fba31b5c8a 100644 --- a/storage/ndb/tools/drop_tab.cpp +++ b/storage/ndb/tools/drop_tab.cpp @@ -30,7 +30,7 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp index 45129cb34af..bd70587f77e 100644 --- a/storage/ndb/tools/listTables.cpp +++ b/storage/ndb/tools/listTables.cpp @@ -256,22 +256,22 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_show_tables"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "loops", 'l', "loops", - (uchar**) &_loops, (uchar**) &_loops, 0, + &_loops, &_loops, 0, GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 }, { "type", 't', "type", - (uchar**) &_type, (uchar**) &_type, 0, + &_type, &_type, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "unqualified", 'u', "Use unqualified table names", - (uchar**) &_unqualified, (uchar**) &_unqualified, 0, + &_unqualified, &_unqualified, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parsable", 'p', "Return output suitable for mysql LOAD DATA INFILE", - (uchar**) &_parsable, (uchar**) &_parsable, 0, + &_parsable, &_parsable, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "show-temp-status", OPT_SHOW_TMP_STATUS, "Show table temporary flag", - (uchar**) &show_temp_status, (uchar**) &show_temp_status, 0, + &show_temp_status, &show_temp_status, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/ndb_config.cpp b/storage/ndb/tools/ndb_config.cpp index af36103f947..0df88dc0167 100644 --- a/storage/ndb/tools/ndb_config.cpp +++ b/storage/ndb/tools/ndb_config.cpp @@ -58,37 +58,37 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_config"), { "nodes", 256, "Print nodes", - (uchar**) &g_nodes, (uchar**) &g_nodes, + &g_nodes, &g_nodes, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { "connections", 256, "Print connections", - (uchar**) &g_connections, (uchar**) &g_connections, + &g_connections, &g_connections, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { "query", 'q', "Query option(s)", - (uchar**) &g_query, (uchar**) &g_query, + &g_query, &g_query, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "host", 256, "Host", - (uchar**) &g_host, (uchar**) &g_host, + &g_host, &g_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "type", 258, "Type of node/connection", - (uchar**) &g_type, (uchar**) &g_type, + &g_type, &g_type, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "id", 258, "Nodeid", - (uchar**) &g_nodeid, (uchar**) &g_nodeid, + &g_nodeid, &g_nodeid, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "nodeid", 258, "Nodeid", - (uchar**) &g_nodeid, (uchar**) &g_nodeid, + &g_nodeid, &g_nodeid, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "fields", 'f', "Field separator", - (uchar**) &g_field_delimiter, (uchar**) &g_field_delimiter, + &g_field_delimiter, &g_field_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "rows", 'r', "Row separator", - (uchar**) &g_row_delimiter, (uchar**) &g_row_delimiter, + &g_row_delimiter, &g_row_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "config-file", 256, "Path to config.ini", - (uchar**) &g_config_file, (uchar**) &g_config_file, + &g_config_file, &g_config_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "mycnf", 256, "Read config from my.cnf", - (uchar**) &g_mycnf, (uchar**) &g_mycnf, + &g_mycnf, &g_mycnf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index 7db77524ad8..966c539cee9 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -100,99 +100,99 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_restore"), { "connect", 'c', "same as --connect-string", - (uchar**) &opt_connect_str, (uchar**) &opt_connect_str, 0, + &opt_connect_str, &opt_connect_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "nodeid", 'n', "Backup files from node with id", - (uchar**) &ga_nodeId, (uchar**) &ga_nodeId, 0, + &ga_nodeId, &ga_nodeId, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "backupid", 'b', "Backup id", - (uchar**) &ga_backupId, (uchar**) &ga_backupId, 0, + &ga_backupId, &ga_backupId, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_data", 'r', "Restore table data/logs into NDB Cluster using NDBAPI", - (uchar**) &_restore_data, (uchar**) &_restore_data, 0, + &_restore_data, &_restore_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_meta", 'm', "Restore meta data into NDB Cluster using NDBAPI", - (uchar**) &_restore_meta, (uchar**) &_restore_meta, 0, + &_restore_meta, &_restore_meta, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-upgrade", 'u', "Don't upgrade array type for var attributes, which don't resize VAR data and don't change column attributes", - (uchar**) &ga_no_upgrade, (uchar**) &ga_no_upgrade, 0, + &ga_no_upgrade, &ga_no_upgrade, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "no-restore-disk-objects", 'd', "Dont restore disk objects (tablespace/logfilegroups etc)", - (uchar**) &_no_restore_disk, (uchar**) &_no_restore_disk, 0, + &_no_restore_disk, &_no_restore_disk, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "restore_epoch", 'e', "Restore epoch info into the status table. Convenient on a MySQL Cluster " "replication slave, for starting replication. The row in " NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.", - (uchar**) &ga_restore_epoch, (uchar**) &ga_restore_epoch, 0, + &ga_restore_epoch, &ga_restore_epoch, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "skip-table-check", 's', "Skip table structure check during restore of data", - (uchar**) &ga_skip_table_check, (uchar**) &ga_skip_table_check, 0, + &ga_skip_table_check, &ga_skip_table_check, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "No of parallel transactions during restore of data." "(parallelism can be 1 to 1024)", - (uchar**) &ga_nParallelism, (uchar**) &ga_nParallelism, 0, + &ga_nParallelism, &ga_nParallelism, 0, GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 }, { "print", OPT_PRINT, "Print metadata, data and log to stdout", - (uchar**) &_print, (uchar**) &_print, 0, + &_print, &_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_data", OPT_PRINT_DATA, "Print data to stdout", - (uchar**) &_print_data, (uchar**) &_print_data, 0, + &_print_data, &_print_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_meta", OPT_PRINT_META, "Print meta data to stdout", - (uchar**) &_print_meta, (uchar**) &_print_meta, 0, + &_print_meta, &_print_meta, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "print_log", OPT_PRINT_LOG, "Print log to stdout", - (uchar**) &_print_log, (uchar**) &_print_log, 0, + &_print_log, &_print_log, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "backup_path", OPT_BACKUP_PATH, "Path to backup files", - (uchar**) &ga_backupPath, (uchar**) &ga_backupPath, 0, + &ga_backupPath, &ga_backupPath, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "dont_ignore_systab_0", 'f', "Experimental. Do not ignore system table during restore.", - (uchar**) &ga_dont_ignore_systab_0, (uchar**) &ga_dont_ignore_systab_0, 0, + &ga_dont_ignore_systab_0, &ga_dont_ignore_systab_0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP, "Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)", - (uchar**) &opt_nodegroup_map_str, - (uchar**) &opt_nodegroup_map_str, + &opt_nodegroup_map_str, + &opt_nodegroup_map_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "fields-enclosed-by", OPT_FIELDS_ENCLOSED_BY, "Fields are enclosed by ...", - (uchar**) &opt_fields_enclosed_by, (uchar**) &opt_fields_enclosed_by, 0, + &opt_fields_enclosed_by, &opt_fields_enclosed_by, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "fields-terminated-by", OPT_FIELDS_TERMINATED_BY, "Fields are terminated by ...", - (uchar**) &opt_fields_terminated_by, - (uchar**) &opt_fields_terminated_by, 0, + &opt_fields_terminated_by, + &opt_fields_terminated_by, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "fields-optionally-enclosed-by", OPT_FIELDS_OPTIONALLY_ENCLOSED_BY, "Fields are optionally enclosed by ...", - (uchar**) &opt_fields_optionally_enclosed_by, - (uchar**) &opt_fields_optionally_enclosed_by, 0, + &opt_fields_optionally_enclosed_by, + &opt_fields_optionally_enclosed_by, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "hex", OPT_HEX_FORMAT, "print binary types in hex format", - (uchar**) &opt_hex_format, (uchar**) &opt_hex_format, 0, + &opt_hex_format, &opt_hex_format, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tab", 'T', "Creates tab separated textfile for each table to " "given path. (creates .txt files)", - (uchar**) &tab_path, (uchar**) &tab_path, 0, + &tab_path, &tab_path, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "append", OPT_APPEND, "for --tab append data to file", - (uchar**) &opt_append, (uchar**) &opt_append, 0, + &opt_append, &opt_append, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "lines-terminated-by", OPT_LINES_TERMINATED_BY, "", - (uchar**) &opt_lines_terminated_by, (uchar**) &opt_lines_terminated_by, 0, + &opt_lines_terminated_by, &opt_lines_terminated_by, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "verbose", OPT_VERBOSE, "verbosity", - (uchar**) &opt_verbose, (uchar**) &opt_verbose, 0, + &opt_verbose, &opt_verbose, 0, GET_INT, REQUIRED_ARG, 1, 0, 255, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/select_all.cpp b/storage/ndb/tools/select_all.cpp index 23d5f95f3f7..95dfeab9eed 100644 --- a/storage/ndb/tools/select_all.cpp +++ b/storage/ndb/tools/select_all.cpp @@ -54,43 +54,43 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "parallelism", - (uchar**) &_parallelism, (uchar**) &_parallelism, 0, + &_parallelism, &_parallelism, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)", - (uchar**) &_lock, (uchar**) &_lock, 0, + &_lock, &_lock, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "order", 'o', "Sort resultset according to index", - (uchar**) &_order, (uchar**) &_order, 0, + &_order, &_order, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "descending", 'z', "Sort descending (requires order flag)", - (uchar**) &_descending, (uchar**) &_descending, 0, + &_descending, &_descending, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "header", 'h', "Print header", - (uchar**) &_header, (uchar**) &_header, 0, + &_header, &_header, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 }, { "useHexFormat", 'x', "Output numbers in hexadecimal format", - (uchar**) &_useHexFormat, (uchar**) &_useHexFormat, 0, + &_useHexFormat, &_useHexFormat, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "delimiter", 'D', "Column delimiter", - (uchar**) &_delimiter, (uchar**) &_delimiter, 0, + &_delimiter, &_delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "disk", 256, "Dump disk ref", - (uchar**) &_dumpDisk, (uchar**) &_dumpDisk, 0, + &_dumpDisk, &_dumpDisk, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "rowid", 256, "Dump rowid", - (uchar**) &use_rowid, (uchar**) &use_rowid, 0, + &use_rowid, &use_rowid, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "gci", 256, "Dump gci", - (uchar**) &use_gci, (uchar**) &use_gci, 0, + &use_gci, &use_gci, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "tupscan", 't', "Scan in tup order", - (uchar**) &_tup, (uchar**) &_tup, 0, + &_tup, &_tup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "nodata", 256, "Dont print data", - (uchar**) &nodata, (uchar**) &nodata, 0, + &nodata, &nodata, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/select_count.cpp b/storage/ndb/tools/select_count.cpp index 73982e886b5..6bdc682c16a 100644 --- a/storage/ndb/tools/select_count.cpp +++ b/storage/ndb/tools/select_count.cpp @@ -43,13 +43,13 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "database", 'd', "Name of database table is in", - (uchar**) &_dbname, (uchar**) &_dbname, 0, + &_dbname, &_dbname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { "parallelism", 'p', "parallelism", - (uchar**) &_parallelism, (uchar**) &_parallelism, 0, + &_parallelism, &_parallelism, 0, GET_INT, REQUIRED_ARG, 240, 0, 0, 0, 0, 0 }, { "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)", - (uchar**) &_lock, (uchar**) &_lock, 0, + &_lock, &_lock, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/ndb/tools/waiter.cpp b/storage/ndb/tools/waiter.cpp index fc2a4b368b1..26c86e6d196 100644 --- a/storage/ndb/tools/waiter.cpp +++ b/storage/ndb/tools/waiter.cpp @@ -44,17 +44,17 @@ static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_desc"), { "no-contact", 'n', "Wait for cluster no contact", - (uchar**) &_no_contact, (uchar**) &_no_contact, 0, + &_no_contact, &_no_contact, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "not-started", OPT_WAIT_STATUS_NOT_STARTED, "Wait for cluster not started", - (uchar**) &_not_started, (uchar**) &_not_started, 0, + &_not_started, &_not_started, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "single-user", OPT_WAIT_STATUS_SINGLE_USER, "Wait for cluster to enter single user mode", - (uchar**) &_single_user, (uchar**) &_single_user, 0, + &_single_user, &_single_user, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { "timeout", 't', "Timeout to wait in seconds", - (uchar**) &_timeout, (uchar**) &_timeout, 0, + &_timeout, &_timeout, 0, GET_INT, REQUIRED_ARG, 120, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; diff --git a/storage/perfschema/pfs_events_waits.cc b/storage/perfschema/pfs_events_waits.cc index aae8f9dc8c1..e32a77512cc 100644 --- a/storage/perfschema/pfs_events_waits.cc +++ b/storage/perfschema/pfs_events_waits.cc @@ -39,7 +39,6 @@ bool flag_events_waits_summary_by_thread_by_event_name= true; bool flag_events_waits_summary_by_event_name= true; /** Consumer flag for table EVENTS_WAITS_SUMMARY_BY_INSTANCE. */ bool flag_events_waits_summary_by_instance= true; -bool flag_events_locks_summary_by_thread_by_event_name= true; bool flag_events_locks_summary_by_event_name= true; bool flag_events_locks_summary_by_instance= true; /** Consumer flag for table FILE_SUMMARY_BY_EVENT_NAME. */ @@ -96,9 +95,8 @@ static void copy_events_waits(PFS_events_waits *dest, /* Signal readers they are about to read garbage ... */ dest->m_wait_class= NO_WAIT_CLASS; /* ... that this can generate. */ - memcpy_fixed(dest_body, - source_body, - sizeof(PFS_events_waits) - sizeof(events_waits_class)); + memcpy(dest_body, source_body, + sizeof(PFS_events_waits) - sizeof(events_waits_class)); /* Signal readers the record is now clean again. */ dest->m_wait_class= source->m_wait_class; } |