diff options
Diffstat (limited to 'myisam/mi_check.c')
-rw-r--r-- | myisam/mi_check.c | 263 |
1 files changed, 188 insertions, 75 deletions
diff --git a/myisam/mi_check.c b/myisam/mi_check.c index ed0a84e737d..420bc974752 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -940,7 +940,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) ha_rows records,del_blocks; my_off_t used,empty,pos,splits,start_recpos, del_length,link_used,start_block; - byte *record,*to; + byte *record= 0, *to; char llbuff[22],llbuff2[22],llbuff3[22]; ha_checksum intern_record_checksum; ha_checksum key_checksum[MI_MAX_POSSIBLE_KEY]; @@ -957,7 +957,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) puts("- check record links"); } - if (!(record= (byte*) my_malloc(info->s->base.pack_reclength,MYF(0)))) + if (!mi_alloc_rec_buff(info, -1, &record)) { mi_check_print_error(param,"Not enough memory for record"); DBUG_RETURN(-1); @@ -1364,17 +1364,150 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) printf("Lost space: %12s Linkdata: %10s\n", llstr(empty,llbuff),llstr(link_used,llbuff2)); } - my_free((gptr) record,MYF(0)); + my_free(mi_get_rec_buff_ptr(info, record), MYF(0)); DBUG_RETURN (error); err: mi_check_print_error(param,"got error: %d when reading datafile at record: %s",my_errno, llstr(records,llbuff)); err2: - my_free((gptr) record,MYF(0)); + my_free(mi_get_rec_buff_ptr(info, record), MYF(0)); param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(1); } /* chk_data_link */ +/** + @brief Drop all indexes + + @param[in] param check parameters + @param[in] info MI_INFO handle + @param[in] force if to force drop all indexes + + @return status + @retval 0 OK + @retval != 0 Error + + @note + Once allocated, index blocks remain part of the key file forever. + When indexes are disabled, no block is freed. When enabling indexes, + no block is freed either. The new indexes are create from new + blocks. (Bug #4692) + + Before recreating formerly disabled indexes, the unused blocks + must be freed. There are two options to do this: + - Follow the tree of disabled indexes, add all blocks to the + deleted blocks chain. Would require a lot of random I/O. + - Drop all blocks by clearing all index root pointers and all + delete chain pointers and resetting key_file_length to the end + of the index file header. This requires to recreate all indexes, + even those that may still be intact. + The second method is probably faster in most cases. + + When disabling indexes, MySQL disables either all indexes or all + non-unique indexes. When MySQL [re-]enables disabled indexes + (T_CREATE_MISSING_KEYS), then we either have "lost" blocks in the + index file, or there are no non-unique indexes. In the latter case, + mi_repair*() would not be called as there would be no disabled + indexes. + + If there would be more unique indexes than disabled (non-unique) + indexes, we could do the first method. But this is not implemented + yet. By now we drop and recreate all indexes when repair is called. + + However, there is an exception. Sometimes MySQL disables non-unique + indexes when the table is empty (e.g. when copying a table in + mysql_alter_table()). When enabling the non-unique indexes, they + are still empty. So there is no index block that can be lost. This + optimization is implemented in this function. + + Note that in normal repair (T_CREATE_MISSING_KEYS not set) we + recreate all enabled indexes unconditonally. We do not change the + key_map. Otherwise we invert the key map temporarily (outside of + this function) and recreate the then "seemingly" enabled indexes. + When we cannot use the optimization, and drop all indexes, we + pretend that all indexes were disabled. By the inversion, we will + then recrate all indexes. +*/ + +static int mi_drop_all_indexes(MI_CHECK *param, MI_INFO *info, my_bool force) +{ + MYISAM_SHARE *share= info->s; + MI_STATE_INFO *state= &share->state; + uint i; + int error; + DBUG_ENTER("mi_drop_all_indexes"); + + /* + If any of the disabled indexes has a key block assigned, we must + drop and recreate all indexes to avoid losing index blocks. + + If we want to recreate disabled indexes only _and_ all of these + indexes are empty, we don't need to recreate the existing indexes. + */ + if (!force && (param->testflag & T_CREATE_MISSING_KEYS)) + { + DBUG_PRINT("repair", ("creating missing indexes")); + for (i= 0; i < share->base.keys; i++) + { + DBUG_PRINT("repair", ("index #: %u key_root: 0x%lx active: %d", + i, (long) state->key_root[i], + mi_is_key_active(state->key_map, i))); + if ((state->key_root[i] != HA_OFFSET_ERROR) && + !mi_is_key_active(state->key_map, i)) + { + /* + This index has at least one key block and it is disabled. + We would lose its block(s) if would just recreate it. + So we need to drop and recreate all indexes. + */ + DBUG_PRINT("repair", ("nonempty and disabled: recreate all")); + break; + } + } + if (i >= share->base.keys) + { + /* + All of the disabled indexes are empty. We can just recreate them. + Flush dirty blocks of this index file from key cache and remove + all blocks of this index file from key cache. + */ + DBUG_PRINT("repair", ("all disabled are empty: create missing")); + error= flush_key_blocks(share->key_cache, share->kfile, + FLUSH_FORCE_WRITE); + goto end; + } + /* + We do now drop all indexes and declare them disabled. With the + T_CREATE_MISSING_KEYS flag, mi_repair*() will recreate all + disabled indexes and enable them. + */ + mi_clear_all_keys_active(state->key_map); + DBUG_PRINT("repair", ("declared all indexes disabled")); + } + + /* Remove all key blocks of this index file from key cache. */ + if ((error= flush_key_blocks(share->key_cache, share->kfile, + FLUSH_IGNORE_CHANGED))) + goto end; /* purecov: inspected */ + + /* Clear index root block pointers. */ + for (i= 0; i < share->base.keys; i++) + state->key_root[i]= HA_OFFSET_ERROR; + + /* Clear the delete chains. */ + for (i= 0; i < state->header.max_block_size; i++) + state->key_del[i]= HA_OFFSET_ERROR; + + /* Reset index file length to end of index file header. */ + info->state->key_file_length= share->base.keystart; + + DBUG_PRINT("repair", ("dropped all indexes")); + /* error= 0; set by last (error= flush_key_bocks()). */ + + end: + DBUG_RETURN(error); +} + + /* Recover old table by reading each record and writing all keys */ /* Save new datafile-name in temp_filename */ @@ -1382,7 +1515,6 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, my_string name, int rep_quick) { int error,got_error; - uint i; ha_rows start_records,new_header_length; my_off_t del; File new_file; @@ -1428,8 +1560,7 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, MYF(MY_WME | MY_WAIT_IF_FULL))) goto err; info->opt_flag|=WRITE_CACHE_USED; - if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength, - MYF(0))) || + if (!mi_alloc_rec_buff(info, -1, &sort_param.record) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { mi_check_print_error(param, "Not enough memory for extra record"); @@ -1486,25 +1617,10 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); - /* - Clear all keys. Note that all key blocks allocated until now remain - "dead" parts of the key file. (Bug #4692) - */ - for (i=0 ; i < info->s->base.keys ; i++) - share->state.key_root[i]= HA_OFFSET_ERROR; - - /* Drop the delete chain. */ - for (i=0 ; i < share->state.header.max_block_size ; i++) - share->state.key_del[i]= HA_OFFSET_ERROR; - - /* - If requested, activate (enable) all keys in key_map. In this case, - all indexes will be (re-)built. - */ + /* This function always recreates all enabled indexes. */ if (param->testflag & T_CREATE_MISSING_KEYS) mi_set_all_keys_active(share->state.key_map, share->base.keys); - - info->state->key_file_length=share->base.keystart; + mi_drop_all_indexes(param, info, TRUE); lock_memory(param); /* Everything is alloced */ @@ -1636,7 +1752,8 @@ err: } my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff), MYF(MY_ALLOW_ZERO_PTR)); - my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR)); + my_free(mi_get_rec_buff_ptr(info, sort_param.record), + MYF(MY_ALLOW_ZERO_PTR)); my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR)); VOID(end_io_cache(¶m->read_cache)); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); @@ -2105,8 +2222,9 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, ulong *rec_per_key_part; char llbuff[22]; SORT_INFO sort_info; - ulonglong key_map=share->state.key_map; + ulonglong key_map; DBUG_ENTER("mi_repair_by_sort"); + LINT_INIT(key_map); start_records=info->state->records; got_error=1; @@ -2142,8 +2260,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, info->opt_flag|=WRITE_CACHE_USED; info->rec_cache.file=info->dfile; /* for sort_delete_record */ - if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength, - MYF(0))) || + if (!mi_alloc_rec_buff(info, -1, &sort_param.record) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { mi_check_print_error(param, "Not enough memory for extra record"); @@ -2179,25 +2296,14 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, } info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); - if (!(param->testflag & T_CREATE_MISSING_KEYS)) - { - /* - Flush key cache for this file if we are calling this outside - myisamchk - */ - flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED); - /* Clear the pointers to the given rows */ - for (i=0 ; i < share->base.keys ; i++) - share->state.key_root[i]= HA_OFFSET_ERROR; - for (i=0 ; i < share->state.header.max_block_size ; i++) - share->state.key_del[i]= HA_OFFSET_ERROR; - info->state->key_file_length=share->base.keystart; - } - else + + /* Optionally drop indexes and optionally modify the key_map. */ + mi_drop_all_indexes(param, info, FALSE); + key_map= share->state.key_map; + if (param->testflag & T_CREATE_MISSING_KEYS) { - if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_FORCE_WRITE)) - goto err; - key_map= ~key_map; /* Create the missing keys */ + /* Invert the copied key_map to recreate all disabled indexes. */ + key_map= ~key_map; } sort_info.info=info; @@ -2240,6 +2346,10 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, sort_param.read_cache=param->read_cache; sort_param.keyinfo=share->keyinfo+sort_param.key; sort_param.seg=sort_param.keyinfo->seg; + /* + Skip this index if it is marked disabled in the copied + (and possibly inverted) key_map. + */ if (! mi_is_key_active(key_map, sort_param.key)) { /* Remember old statistics for key */ @@ -2247,6 +2357,8 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, (char*) (share->state.rec_per_key_part + (uint) (rec_per_key_part - param->rec_per_key_part)), sort_param.keyinfo->keysegs*sizeof(*rec_per_key_part)); + DBUG_PRINT("repair", ("skipping seemingly disabled index #: %u", + sort_param.key)); continue; } @@ -2302,8 +2414,11 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, if (param->testflag & T_STATISTICS) update_key_parts(sort_param.keyinfo, rec_per_key_part, sort_param.unique, param->stats_method == MI_STATS_METHOD_IGNORE_NULLS? - sort_param.notnull: NULL,(ulonglong) info->state->records); + sort_param.notnull: NULL, + (ulonglong) info->state->records); + /* Enable this index in the permanent (not the copied) key_map. */ mi_set_key_active(share->state.key_map, sort_param.key); + DBUG_PRINT("repair", ("set enabled index #: %u", sort_param.key)); if (sort_param.fix_datafile) { @@ -2428,7 +2543,8 @@ err: my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff), MYF(MY_ALLOW_ZERO_PTR)); - my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR)); + my_free(mi_get_rec_buff_ptr(info, sort_param.record), + MYF(MY_ALLOW_ZERO_PTR)); my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR)); my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR)); my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR)); @@ -2504,9 +2620,11 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, IO_CACHE new_data_cache; /* For non-quick repair. */ IO_CACHE_SHARE io_share; SORT_INFO sort_info; - ulonglong key_map=share->state.key_map; + ulonglong key_map; pthread_attr_t thr_attr; + ulong max_pack_reclength; DBUG_ENTER("mi_repair_parallel"); + LINT_INIT(key_map); start_records=info->state->records; got_error=1; @@ -2608,25 +2726,14 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, } info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); - if (!(param->testflag & T_CREATE_MISSING_KEYS)) - { - /* - Flush key cache for this file if we are calling this outside - myisamchk - */ - flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED); - /* Clear the pointers to the given rows */ - for (i=0 ; i < share->base.keys ; i++) - share->state.key_root[i]= HA_OFFSET_ERROR; - for (i=0 ; i < share->state.header.max_block_size ; i++) - share->state.key_del[i]= HA_OFFSET_ERROR; - info->state->key_file_length=share->base.keystart; - } - else + + /* Optionally drop indexes and optionally modify the key_map. */ + mi_drop_all_indexes(param, info, FALSE); + key_map= share->state.key_map; + if (param->testflag & T_CREATE_MISSING_KEYS) { - if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_FORCE_WRITE)) - goto err; - key_map= ~key_map; /* Create the missing keys */ + /* Invert the copied key_map to recreate all disabled indexes. */ + key_map= ~key_map; } sort_info.info=info; @@ -2662,10 +2769,13 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, del=info->state->del; param->glob_crc=0; - + /* for compressed tables */ + max_pack_reclength= share->base.pack_reclength; + if (share->options & HA_OPTION_COMPRESS_RECORD) + set_if_bigger(max_pack_reclength, share->max_pack_length); if (!(sort_param=(MI_SORT_PARAM *) my_malloc((uint) share->base.keys * - (sizeof(MI_SORT_PARAM) + share->base.pack_reclength), + (sizeof(MI_SORT_PARAM) + max_pack_reclength), MYF(MY_ZEROFILL)))) { mi_check_print_error(param,"Not enough memory for key!"); @@ -2682,6 +2792,10 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, sort_param[i].key=key; sort_param[i].keyinfo=share->keyinfo+key; sort_param[i].seg=sort_param[i].keyinfo->seg; + /* + Skip this index if it is marked disabled in the copied + (and possibly inverted) key_map. + */ if (! mi_is_key_active(key_map, key)) { /* Remember old statistics for key */ @@ -2717,7 +2831,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length; sort_param[i].record= (((char *)(sort_param+share->base.keys))+ - (share->base.pack_reclength * i)); + (max_pack_reclength * i)); if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff)) { mi_check_print_error(param,"Not enough memory!"); @@ -4333,7 +4447,7 @@ err: void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, my_bool repair_only) { - byte *record; + byte *record= 0; DBUG_ENTER("update_auto_increment_key"); if (!info->s->base.auto_key || @@ -4352,8 +4466,7 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, We have to use an allocated buffer instead of info->rec_buff as _mi_put_key_in_record() may use info->rec_buff */ - if (!(record= (byte*) my_malloc((uint) info->s->base.pack_reclength, - MYF(0)))) + if (!mi_alloc_rec_buff(info, -1, &record)) { mi_check_print_error(param,"Not enough memory for extra record"); DBUG_VOID_RETURN; @@ -4365,7 +4478,7 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, if (my_errno != HA_ERR_END_OF_FILE) { mi_extra(info,HA_EXTRA_NO_KEYREAD,0); - my_free((char*) record, MYF(0)); + my_free(mi_get_rec_buff_ptr(info, record), MYF(0)); mi_check_print_error(param,"%d when reading last record",my_errno); DBUG_VOID_RETURN; } @@ -4380,7 +4493,7 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, set_if_bigger(info->s->state.auto_increment, param->auto_increment_value); } mi_extra(info,HA_EXTRA_NO_KEYREAD,0); - my_free((char*) record, MYF(0)); + my_free(mi_get_rec_buff_ptr(info, record), MYF(0)); update_state_info(param, info, UPDATE_AUTO_INC); DBUG_VOID_RETURN; } |