diff options
author | unknown <igor@rurik.mysql.com> | 2003-06-30 09:41:41 -0700 |
---|---|---|
committer | unknown <igor@rurik.mysql.com> | 2003-06-30 09:41:41 -0700 |
commit | 80c1bba3a90e36dbcbc4ec35171f21508759e99b (patch) | |
tree | 44574656fb0c8810963e2afccae4c1d98fc12d0b | |
parent | 0912871d6aeefe61dae1370ab9a1493341b9b4fc (diff) | |
download | mariadb-git-80c1bba3a90e36dbcbc4ec35171f21508759e99b.tar.gz |
Many files:
Added multiple key cache
myisam/mi_page.c:
Added multiple key cache
myisam/mi_delete_all.c:
Added multiple key cache
myisam/mi_extra.c:
Added multiple key cache
myisam/mi_locking.c:
Added multiple key cache
myisam/mi_panic.c:
Added multiple key cache
myisam/mi_preload.c:
Added multiple key cache
myisam/mi_check.c:
Added multiple key cache
myisam/myisamchk.c:
Added multiple key cache
myisam/myisamlog.c:
Added multiple key cache
myisam/mi_close.c:
Added multiple key cache
myisam/mi_test1.c:
Added multiple key cache
myisam/mi_test2.c:
Added multiple key cache
myisam/mi_test3.c:
Added multiple key cache
isam/_page.c:
Added multiple key cache
isam/_locking.c:
Added multiple key cache
isam/panic.c:
Added multiple key cache
isam/close.c:
Added multiple key cache
isam/isamchk.c:
Added multiple key cache
isam/test2.c:
Added multiple key cache
isam/test3.c:
Added multiple key cache
mysys/mf_keycache.c:
Added multiple key cache
include/my_sys.h:
Added multiple key cache
-rw-r--r-- | include/my_sys.h | 21 | ||||
-rw-r--r-- | isam/_locking.c | 4 | ||||
-rw-r--r-- | isam/_page.c | 22 | ||||
-rw-r--r-- | isam/close.c | 3 | ||||
-rw-r--r-- | isam/isamchk.c | 11 | ||||
-rw-r--r-- | isam/panic.c | 2 | ||||
-rw-r--r-- | isam/test2.c | 4 | ||||
-rw-r--r-- | isam/test3.c | 2 | ||||
-rw-r--r-- | myisam/mi_check.c | 23 | ||||
-rw-r--r-- | myisam/mi_close.c | 2 | ||||
-rw-r--r-- | myisam/mi_delete_all.c | 2 | ||||
-rw-r--r-- | myisam/mi_extra.c | 4 | ||||
-rw-r--r-- | myisam/mi_locking.c | 5 | ||||
-rw-r--r-- | myisam/mi_page.c | 12 | ||||
-rw-r--r-- | myisam/mi_panic.c | 2 | ||||
-rw-r--r-- | myisam/mi_preload.c | 8 | ||||
-rw-r--r-- | myisam/mi_test1.c | 2 | ||||
-rw-r--r-- | myisam/mi_test2.c | 11 | ||||
-rw-r--r-- | myisam/mi_test3.c | 2 | ||||
-rw-r--r-- | myisam/myisamchk.c | 5 | ||||
-rw-r--r-- | myisam/myisamlog.c | 6 | ||||
-rw-r--r-- | mysys/mf_keycache.c | 886 |
22 files changed, 573 insertions, 466 deletions
diff --git a/include/my_sys.h b/include/my_sys.h index 48ebdc22f37..7f4d0e0e62f 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -248,6 +248,8 @@ extern my_bool NEAR my_disable_locking,NEAR my_disable_async_io, extern char wild_many,wild_one,wild_prefix; extern const char *charsets_dir; extern char *defaults_extra_file; +extern void *dflt_keycache; +#define dflt_key_block_size DEFAULT_KEYCACHE_BLOCK_SIZE typedef struct wild_file_pack /* Struct to hold info when selecting files */ { @@ -639,16 +641,21 @@ extern int flush_write_cache(RECORD_CACHE *info); extern long my_clock(void); extern sig_handler sigtstp_handler(int signal_number); extern void handle_recived_signals(void); -extern int init_key_cache(ulong use_mem); -extern int resize_key_cache(ulong use_mem); -extern byte *key_cache_read(File file,my_off_t filepos,byte* buff,uint length, +extern int init_key_cache(void **pkeycache,uint key_cache_block_size, + ulong use_mem); +extern int resize_key_cache(void **pkeycache,ulong use_mem); +extern byte *key_cache_read(void *pkeycache, + File file,my_off_t filepos,byte* buff,uint length, uint block_length,int return_buffer); -extern int key_cache_insert(File file, my_off_t filepos, +extern int key_cache_insert(void *pkeycache, + File file, my_off_t filepos, byte *buff, uint length); -extern int key_cache_write(File file,my_off_t filepos,byte* buff,uint length, +extern int key_cache_write(void *pkeycache, + File file,my_off_t filepos,byte* buff,uint length, uint block_length,int force_write); -extern int flush_key_blocks(int file, enum flush_type type); -extern void end_key_cache(void); +extern int flush_key_blocks(void *pkeycache, + int file, enum flush_type type); +extern void end_key_cache(void **pkeycache,my_bool cleanup); extern sig_handler my_set_alarm_variable(int signo); extern void my_string_ptr_sort(void *base,uint items,size_s size); extern void radixsort_for_str_ptr(uchar* base[], uint number_of_elements, diff --git a/isam/_locking.c b/isam/_locking.c index be9741a4237..3964f7e8ddf 100644 --- a/isam/_locking.c +++ b/isam/_locking.c @@ -50,7 +50,7 @@ int nisam_lock_database(N_INFO *info, int lock_type) else count= --share->w_locks; if (info->lock_type == F_WRLCK && !share->w_locks && - flush_key_blocks(share->kfile,FLUSH_KEEP)) + flush_key_blocks(dflt_keycache,share->kfile,FLUSH_KEEP)) error=my_errno; if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) if (end_io_cache(&info->rec_cache)) @@ -329,7 +329,7 @@ int _nisam_test_if_changed(register N_INFO *info) share->state.uniq != info->last_uniq) { /* Keyfile has changed */ if (share->state.process != share->this_process) - VOID(flush_key_blocks(share->kfile,FLUSH_RELEASE)); + VOID(flush_key_blocks(dflt_keycache,share->kfile,FLUSH_RELEASE)); share->last_process=share->state.process; info->last_loop= share->state.loop; info->last_uniq= share->state.uniq; diff --git a/isam/_page.c b/isam/_page.c index 65733d66b77..fc150cf14bd 100644 --- a/isam/_page.c +++ b/isam/_page.c @@ -27,10 +27,11 @@ uchar *_nisam_fetch_keypage(register N_INFO *info, N_KEYDEF *keyinfo, my_off_t page, uchar *buff, int return_buffer) { uchar *tmp; - tmp=(uchar*) key_cache_read(info->s->kfile,page,(byte*) buff, - (uint) keyinfo->base.block_length, - (uint) keyinfo->base.block_length, - return_buffer); + tmp=(uchar*) key_cache_read(dflt_keycache, + info->s->kfile,page,(byte*) buff, + (uint) keyinfo->base.block_length, + (uint) keyinfo->base.block_length, + return_buffer); if (tmp == info->buff) { info->update|=HA_STATE_BUFF_SAVED; @@ -83,9 +84,10 @@ int _nisam_write_keypage(register N_INFO *info, register N_KEYDEF *keyinfo, length=keyinfo->base.block_length; } #endif - return (key_cache_write(info->s->kfile,page,(byte*) buff,length, - (uint) keyinfo->base.block_length, - (int) (info->lock_type != F_UNLCK))); + return (key_cache_write(dflt_keycache, + info->s->kfile,page,(byte*) buff,length, + (uint) keyinfo->base.block_length, + (int) (info->lock_type != F_UNLCK))); } /* nisam_write_keypage */ @@ -99,7 +101,8 @@ int _nisam_dispose(register N_INFO *info, N_KEYDEF *keyinfo, my_off_t pos) old_link=info->s->state.key_del[keynr]; info->s->state.key_del[keynr]=(ulong) pos; - DBUG_RETURN(key_cache_write(info->s->kfile,pos,(byte*) &old_link, + DBUG_RETURN(key_cache_write(dflt_keycache, + info->s->kfile,pos,(byte*) &old_link, sizeof(long), (uint) keyinfo->base.block_length, (int) (info->lock_type != F_UNLCK))); @@ -126,7 +129,8 @@ ulong _nisam_new(register N_INFO *info, N_KEYDEF *keyinfo) } else { - if (!key_cache_read(info->s->kfile,pos, + if (!key_cache_read(dflt_keycache, + info->s->kfile,pos, (byte*) &info->s->state.key_del[keynr], (uint) sizeof(long), (uint) keyinfo->base.block_length,0)) diff --git a/isam/close.c b/isam/close.c index f1465990100..075d67d60f5 100644 --- a/isam/close.c +++ b/isam/close.c @@ -56,7 +56,8 @@ int nisam_close(register N_INFO *info) if (flag) { - if (share->kfile >= 0 && flush_key_blocks(share->kfile,FLUSH_RELEASE)) + if (share->kfile >= 0 && + flush_key_blocks(dflt_keycache,share->kfile,FLUSH_RELEASE)) error=my_errno; if (share->kfile >= 0 && my_close(share->kfile,MYF(0))) error = my_errno; diff --git a/isam/isamchk.c b/isam/isamchk.c index dc772290e13..d3db440bf87 100644 --- a/isam/isamchk.c +++ b/isam/isamchk.c @@ -516,7 +516,8 @@ static int nisamchk(my_string filename) if (!rep_quick) { if (testflag & T_EXTEND) - VOID(init_key_cache(use_buffers)); + VOID(init_key_cache(&dflt_keycache,dflt_key_block_size, + use_buffers)); VOID(init_io_cache(&read_cache,datafile,(uint) read_buffer_length, READ_CACHE,share->pack.header_length,1, MYF(MY_WME))); @@ -1459,7 +1460,7 @@ my_string name; printf("Data records: %lu\n",(ulong) share->state.records); } - VOID(init_key_cache(use_buffers)); + VOID(init_key_cache(&dflt_keycache,dflt_key_block_size,use_buffers)); if (init_io_cache(&read_cache,info->dfile,(uint) read_buffer_length, READ_CACHE,share->pack.header_length,1,MYF(MY_WME))) goto err; @@ -1887,12 +1888,12 @@ static void lock_memory(void) static int flush_blocks(file) File file; { - if (flush_key_blocks(file,FLUSH_RELEASE)) + if (flush_key_blocks(dflt_keycache,file,FLUSH_RELEASE)) { print_error("%d when trying to write bufferts",my_errno); return(1); } - end_key_cache(); + end_key_cache(&dflt_keycache,1); return 0; } /* flush_blocks */ @@ -1936,7 +1937,7 @@ int write_info; if (share->state.key_root[sort_key] == NI_POS_ERROR) DBUG_RETURN(0); /* Nothing to do */ - init_key_cache(use_buffers); + init_key_cache(&dflt_keycache,dflt_key_block_size,use_buffers); if (init_io_cache(&info->rec_cache,-1,(uint) write_buffer_length, WRITE_CACHE,share->pack.header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL))) diff --git a/isam/panic.c b/isam/panic.c index e51e83671df..53e8762afd3 100644 --- a/isam/panic.c +++ b/isam/panic.c @@ -48,7 +48,7 @@ int nisam_panic(enum ha_panic_function flag) if (info->s->base.options & HA_OPTION_READ_ONLY_DATA) break; #endif - if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE)) + if (flush_key_blocks(dflt_keycache,info->s->kfile,FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) diff --git a/isam/test2.c b/isam/test2.c index c8f97ccdefa..68d2ee7a82a 100644 --- a/isam/test2.c +++ b/isam/test2.c @@ -156,7 +156,7 @@ int main(int argc, char *argv[]) goto err; printf("- Writing key:s\n"); if (key_cacheing) - init_key_cache(IO_SIZE*16); /* Use a small cache */ + init_key_cache(&dflt_keycache,dflt_key_block_size,IO_SIZE*16); /* Use a small cache */ if (locking) nisam_lock_database(file,F_WRLCK); if (write_cacheing) @@ -674,7 +674,7 @@ end: puts("Locking used"); if (use_blob) puts("blobs used"); - end_key_cache(); + end_key_cache(&dflt_keycache,1); if (blob_buffer) my_free(blob_buffer,MYF(0)); my_end(MY_CHECK_ERROR | MY_GIVE_INFO); diff --git a/isam/test3.c b/isam/test3.c index 228030f5832..b753b6d4dac 100644 --- a/isam/test3.c +++ b/isam/test3.c @@ -173,7 +173,7 @@ void start_test(int id) exit(1); } if (key_cacheing && rnd(2) == 0) - init_key_cache(65536L); + init_key_cache(&dflt_keycache,dflt_key_block_size,65536L); printf("Process %d, pid: %d\n",id,(int) getpid()); fflush(stdout); for (error=i=0 ; i < tests && !error; i++) diff --git a/myisam/mi_check.c b/myisam/mi_check.c index 019222fdf22..61eba7d44bf 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -230,7 +230,8 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr) if (next_link > info->state->key_file_length || next_link & (info->s->blocksize-1)) DBUG_RETURN(1); - if (!(buff=key_cache_read(info->s->kfile, next_link, (byte*) info->buff, + if (!(buff=key_cache_read(dflt_keycache, + info->s->kfile, next_link, (byte*) info->buff, myisam_block_size, block_size, 1))) DBUG_RETURN(1); next_link=mi_sizekorr(buff); @@ -259,7 +260,8 @@ int chk_size(MI_CHECK *param, register MI_INFO *info) if (!(param->testflag & T_SILENT)) puts("- check file-size"); - flush_key_blocks(info->s->kfile, FLUSH_FORCE_WRITE); /* If called externally */ + flush_key_blocks(dflt_keycache, + info->s->kfile, FLUSH_FORCE_WRITE); /* If called externally */ size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0)); if ((skr=(my_off_t) info->state->key_file_length) != size) @@ -1119,7 +1121,8 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info, param->testflag|=T_REP; /* for easy checking */ if (!param->using_global_keycache) - VOID(init_key_cache(param->use_buffers)); + VOID(init_key_cache(&dflt_keycache,dflt_key_block_size, + param->use_buffers)); if (init_io_cache(¶m->read_cache,info->dfile, (uint) param->read_buffer_length, @@ -1477,13 +1480,13 @@ void lock_memory(MI_CHECK *param __attribute__((unused))) int flush_blocks(MI_CHECK *param, File file) { - if (flush_key_blocks(file,FLUSH_RELEASE)) + if (flush_key_blocks(dflt_keycache,file,FLUSH_RELEASE)) { mi_check_print_error(param,"%d when trying to write bufferts",my_errno); return(1); } if (!param->using_global_keycache) - end_key_cache(); + end_key_cache(&dflt_keycache,1); return 0; } /* flush_blocks */ @@ -1537,7 +1540,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name) } /* Flush key cache for this file if we are calling this outside myisamchk */ - flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED); + flush_key_blocks(dflt_keycache,share->kfile, FLUSH_IGNORE_CHANGED); share->state.version=(ulong) time((time_t*) 0); old_state=share->state; /* save state if not stored */ @@ -1843,7 +1846,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, Flush key cache for this file if we are calling this outside myisamchk */ - flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED); + flush_key_blocks(dflt_keycache,share->kfile, FLUSH_IGNORE_CHANGED); /* Clear the pointers to the given rows */ for (i=0 ; i < share->base.keys ; i++) share->state.key_root[i]= HA_OFFSET_ERROR; @@ -1853,7 +1856,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, } else { - if (flush_key_blocks(share->kfile, FLUSH_FORCE_WRITE)) + if (flush_key_blocks(dflt_keycache,share->kfile, FLUSH_FORCE_WRITE)) goto err; key_map= ~key_map; /* Create the missing keys */ } @@ -2206,7 +2209,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, Flush key cache for this file if we are calling this outside myisamchk */ - flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED); + flush_key_blocks(dflt_keycache,share->kfile, FLUSH_IGNORE_CHANGED); /* Clear the pointers to the given rows */ for (i=0 ; i < share->base.keys ; i++) share->state.key_root[i]= HA_OFFSET_ERROR; @@ -2216,7 +2219,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, } else { - if (flush_key_blocks(share->kfile, FLUSH_FORCE_WRITE)) + if (flush_key_blocks(dflt_keycache,share->kfile, FLUSH_FORCE_WRITE)) goto err; key_map= ~key_map; /* Create the missing keys */ } diff --git a/myisam/mi_close.c b/myisam/mi_close.c index dbaaebb1143..6f13af41fbd 100644 --- a/myisam/mi_close.c +++ b/myisam/mi_close.c @@ -64,7 +64,7 @@ int mi_close(register MI_INFO *info) if (flag) { if (share->kfile >= 0 && - flush_key_blocks(share->kfile, + flush_key_blocks(dflt_keycache,share->kfile, share->temporary ? FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)) error=my_errno; diff --git a/myisam/mi_delete_all.c b/myisam/mi_delete_all.c index 45e56626d59..46d887a90d8 100644 --- a/myisam/mi_delete_all.c +++ b/myisam/mi_delete_all.c @@ -53,7 +53,7 @@ int mi_delete_all_rows(MI_INFO *info) If we are using delayed keys or if the user has done changes to the tables since it was locked then there may be key blocks in the key cache */ - flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED); + flush_key_blocks(dflt_keycache, share->kfile, FLUSH_IGNORE_CHANGED); if (my_chsize(info->dfile, 0, 0, MYF(MY_WME)) || my_chsize(share->kfile, share->base.keystart, 0, MYF(MY_WME)) ) goto err; diff --git a/myisam/mi_extra.c b/myisam/mi_extra.c index 4b71f3a867b..f0d8966c9e3 100644 --- a/myisam/mi_extra.c +++ b/myisam/mi_extra.c @@ -279,7 +279,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) #ifdef __WIN__ /* Close the isam and data files as Win32 can't drop an open table */ pthread_mutex_lock(&share->intern_lock); - if (flush_key_blocks(share->kfile, + if (flush_key_blocks(dflt_keycache, share->kfile, (function == HA_EXTRA_FORCE_REOPEN ? FLUSH_RELEASE : FLUSH_IGNORE_CHANGED))) { @@ -325,7 +325,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) break; case HA_EXTRA_FLUSH: if (!share->temporary) - flush_key_blocks(share->kfile,FLUSH_KEEP); + flush_key_blocks(dflt_keycache,share->kfile,FLUSH_KEEP); #ifdef HAVE_PWRITE _mi_decrement_open_count(info); #endif diff --git a/myisam/mi_locking.c b/myisam/mi_locking.c index cbde05d31f5..7ede4852ccd 100644 --- a/myisam/mi_locking.c +++ b/myisam/mi_locking.c @@ -51,7 +51,8 @@ int mi_lock_database(MI_INFO *info, int lock_type) count= --share->w_locks; --share->tot_locks; if (info->lock_type == F_WRLCK && !share->w_locks && - !share->delay_key_write && flush_key_blocks(share->kfile,FLUSH_KEEP)) + !share->delay_key_write && + flush_key_blocks(dflt_keycache,share->kfile,FLUSH_KEEP)) { error=my_errno; mi_mark_crashed(info); /* Mark that table must be checked */ @@ -385,7 +386,7 @@ int _mi_test_if_changed(register MI_INFO *info) { /* Keyfile has changed */ DBUG_PRINT("info",("index file changed")); if (share->state.process != share->this_process) - VOID(flush_key_blocks(share->kfile,FLUSH_RELEASE)); + VOID(flush_key_blocks(dflt_keycache,share->kfile,FLUSH_RELEASE)); share->last_process=share->state.process; info->last_unique= share->state.unique; info->last_loop= share->state.update_count; diff --git a/myisam/mi_page.c b/myisam/mi_page.c index 49f3243e08d..eef65f03493 100644 --- a/myisam/mi_page.c +++ b/myisam/mi_page.c @@ -31,7 +31,8 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo, DBUG_ENTER("_mi_fetch_keypage"); DBUG_PRINT("enter",("page: %ld",page)); - tmp=(uchar*) key_cache_read(info->s->kfile,page,(byte*) buff, + tmp=(uchar*) key_cache_read(dflt_keycache, + info->s->kfile,page,(byte*) buff, (uint) keyinfo->block_length, (uint) keyinfo->block_length, return_buffer); @@ -92,7 +93,8 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo, length=keyinfo->block_length; } #endif - DBUG_RETURN((key_cache_write(info->s->kfile,page,(byte*) buff,length, + DBUG_RETURN((key_cache_write(dflt_keycache, + info->s->kfile,page,(byte*) buff,length, (uint) keyinfo->block_length, (int) ((info->lock_type != F_UNLCK) || info->s->delay_key_write)))); @@ -112,7 +114,8 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos) info->s->state.key_del[keyinfo->block_size]=pos; mi_sizestore(buff,old_link); info->s->state.changed|= STATE_NOT_SORTED_PAGES; - DBUG_RETURN(key_cache_write(info->s->kfile,pos,buff, + DBUG_RETURN(key_cache_write(dflt_keycache, + info->s->kfile,pos,buff, sizeof(buff), (uint) keyinfo->block_length, (int) (info->lock_type != F_UNLCK))); @@ -140,7 +143,8 @@ my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo) } else { - if (!key_cache_read(info->s->kfile,pos, + if (!key_cache_read(dflt_keycache, + info->s->kfile,pos, buff, (uint) sizeof(buff), (uint) keyinfo->block_length,0)) diff --git a/myisam/mi_panic.c b/myisam/mi_panic.c index bd0b07b097e..3a436969b06 100644 --- a/myisam/mi_panic.c +++ b/myisam/mi_panic.c @@ -48,7 +48,7 @@ int mi_panic(enum ha_panic_function flag) if (info->s->options & HA_OPTION_READ_ONLY_DATA) break; #endif - if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE)) + if (flush_key_blocks(dflt_keycache,info->s->kfile,FLUSH_RELEASE)) error=my_errno; if (info->opt_flag & WRITE_CACHE_USED) if (flush_io_cache(&info->rec_cache)) diff --git a/myisam/mi_preload.c b/myisam/mi_preload.c index be45be66ecf..2cadc8f2e75 100644 --- a/myisam/mi_preload.c +++ b/myisam/mi_preload.c @@ -72,7 +72,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) if (!(buff= (uchar *) my_malloc(length, MYF(MY_WME)))) return (my_errno= HA_ERR_OUT_OF_MEM); - if (flush_key_blocks(share->kfile, FLUSH_RELEASE)) + if (flush_key_blocks(dflt_keycache,share->kfile, FLUSH_RELEASE)) goto err; do @@ -89,7 +89,8 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { if (mi_test_if_nod(buff)) { - if (key_cache_insert(share->kfile, pos, (byte*) buff, block_length)) + if (key_cache_insert(dflt_keycache, + share->kfile, pos, (byte*) buff, block_length)) goto err; } pos+= block_length; @@ -99,7 +100,8 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) } else { - if (key_cache_insert(share->kfile, pos, (byte*) buff, length)) + if (key_cache_insert(dflt_keycache, + share->kfile, pos, (byte*) buff, length)) goto err; pos+= length; } diff --git a/myisam/mi_test1.c b/myisam/mi_test1.c index 5a6818b8a3e..d08066f6028 100644 --- a/myisam/mi_test1.c +++ b/myisam/mi_test1.c @@ -50,7 +50,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); my_init(); if (key_cacheing) - init_key_cache(IO_SIZE*16); + init_key_cache(&dflt_keycache,dflt_key_block_size,IO_SIZE*16); get_options(argc,argv); exit(run_test("test1")); diff --git a/myisam/mi_test2.c b/myisam/mi_test2.c index 8dc279b6d97..d69673c319d 100644 --- a/myisam/mi_test2.c +++ b/myisam/mi_test2.c @@ -214,7 +214,7 @@ int main(int argc, char *argv[]) if (!silent) printf("- Writing key:s\n"); if (key_cacheing) - init_key_cache(key_cache_size); /* Use a small cache */ + init_key_cache(&dflt_keycache,dflt_key_block_size,key_cache_size); /* Use a small cache */ if (locking) mi_lock_database(file,F_WRLCK); if (write_cacheing) @@ -274,7 +274,7 @@ int main(int argc, char *argv[]) goto end; } if (key_cacheing) - resize_key_cache(key_cache_size*2); + resize_key_cache(&dflt_keycache,key_cache_size*2); } if (!silent) @@ -816,16 +816,19 @@ end: puts("Locking used"); if (use_blob) puts("blobs used"); +#if 0 printf("key cache status: \n\ blocks used:%10lu\n\ w_requests: %10lu\n\ writes: %10lu\n\ r_requests: %10lu\n\ reads: %10lu\n", - my_blocks_used, my_cache_w_requests, my_cache_write, + my_blocks_used, + my_cache_w_requests, my_cache_write, my_cache_r_requests, my_cache_read); +#endif } - end_key_cache(); + end_key_cache(&dflt_keycache,1); if (blob_buffer) my_free(blob_buffer,MYF(0)); my_end(silent ? MY_CHECK_ERROR : MY_CHECK_ERROR | MY_GIVE_INFO); diff --git a/myisam/mi_test3.c b/myisam/mi_test3.c index 63cea4f715d..dde8758f9bf 100644 --- a/myisam/mi_test3.c +++ b/myisam/mi_test3.c @@ -177,7 +177,7 @@ void start_test(int id) exit(1); } if (key_cacheing && rnd(2) == 0) - init_key_cache(65536L); + init_key_cache(&dflt_keycache,dflt_key_block_size,65536L); printf("Process %d, pid: %d\n",id,getpid()); fflush(stdout); for (error=i=0 ; i < tests && !error; i++) diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c index 72b2567278e..d2145035518 100644 --- a/myisam/myisamchk.c +++ b/myisam/myisamchk.c @@ -1020,7 +1020,8 @@ static int myisamchk(MI_CHECK *param, my_string filename) !(param->testflag & (T_FAST | T_FORCE_CREATE))) { if (param->testflag & (T_EXTEND | T_MEDIUM)) - VOID(init_key_cache(param->use_buffers)); + VOID(init_key_cache(&dflt_keycache,dflt_key_block_size, + param->use_buffers)); VOID(init_io_cache(¶m->read_cache,datafile, (uint) param->read_buffer_length, READ_CACHE, @@ -1437,7 +1438,7 @@ static int mi_sort_records(MI_CHECK *param, if (share->state.key_root[sort_key] == HA_OFFSET_ERROR) DBUG_RETURN(0); /* Nothing to do */ - init_key_cache(param->use_buffers); + init_key_cache(&dflt_keycache,dflt_key_block_size,param->use_buffers); if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length, WRITE_CACHE,share->pack.header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL))) diff --git a/myisam/myisamlog.c b/myisam/myisamlog.c index 4bb7cc55d30..ff1a9d30206 100644 --- a/myisam/myisamlog.c +++ b/myisam/myisamlog.c @@ -333,7 +333,7 @@ static int examine_log(my_string file_name, char **table_names) bzero((gptr) com_count,sizeof(com_count)); init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1, (tree_element_free) file_info_free, NULL); - VOID(init_key_cache(KEY_CACHE_SIZE)); + VOID(init_key_cache(&dflt_keycache,dflt_key_block_size,KEY_CACHE_SIZE)); files_open=0; access_time=0; while (access_time++ != number_of_commands && @@ -639,7 +639,7 @@ static int examine_log(my_string file_name, char **table_names) goto end; } } - end_key_cache(); + end_key_cache(&dflt_keycache,1); delete_tree(&tree); VOID(end_io_cache(&cache)); VOID(my_close(file,MYF(0))); @@ -659,7 +659,7 @@ static int examine_log(my_string file_name, char **table_names) llstr(isamlog_filepos,llbuff))); fflush(stderr); end: - end_key_cache(); + end_key_cache(&dflt_keycache,1); delete_tree(&tree); VOID(end_io_cache(&cache)); VOID(my_close(file,MYF(0))); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 3276044fc2f..70050e70acd 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -138,46 +138,54 @@ typedef struct st_block_link KEYCACHE_CONDVAR *condvar; /* condition variable for 'no readers' event */ } BLOCK_LINK; -static int flush_all_key_blocks(); -static void test_key_cache(const char *where, my_bool lock); - -uint key_cache_block_size= /* size of the page buffer of a cache block */ - DEFAULT_KEYCACHE_BLOCK_SIZE; -static uint key_cache_shift; +void *dflt_keycache; +uint key_cache_block_size; /* size of the page buffer of a cache block */ +ulong my_cache_w_requests, my_cache_write, /* counters */ + my_cache_r_requests, my_cache_read; /* for statistics */ +ulong my_blocks_used, /* number of currently used blocks */ + my_blocks_changed; /* number of currently dirty blocks */ #define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ #define FLUSH_CACHE 2000 /* sort this many blocks at once */ -static KEYCACHE_WQUEUE - waiting_for_hash_link; /* queue of requests waiting for a free hash link */ -static KEYCACHE_WQUEUE - waiting_for_block; /* queue of requests waiting for a free block */ - -static HASH_LINK **my_hash_root; /* arr. of entries into hash table buckets */ -static uint my_hash_entries; /* max number of entries in the hash table */ -static HASH_LINK *my_hash_link_root; /* memory for hash table links */ -static int my_hash_links; /* max number of hash links */ -static int my_hash_links_used; /* number of hash links currently used */ -static HASH_LINK *my_free_hash_list; /* list of free hash links */ -static BLOCK_LINK *my_block_root; /* memory for block links */ -static int my_disk_blocks; /* max number of blocks in the cache */ -static byte HUGE_PTR *my_block_mem; /* memory for block buffers */ -static BLOCK_LINK *my_used_last; /* ptr to the last block of the LRU chain */ -ulong my_blocks_used, /* number of currently used blocks */ - my_blocks_changed; /* number of currently dirty blocks */ +typedef struct st_key_cache +{ + my_bool key_cache_inited; + uint key_cache_shift; + uint key_cache_block_size; /* size of the page buffer of a cache block */ + uint hash_entries; /* max number of entries in the hash table */ + int hash_links; /* max number of hash links */ + int hash_links_used; /* number of hash links currently used */ + int disk_blocks; /* max number of blocks in the cache */ + ulong blocks_used; /* number of currently used blocks */ + ulong blocks_changed; /* number of currently dirty blocks */ + ulong cache_w_requests; + ulong cache_write; + ulong cache_r_requests; + ulong cache_read; #if defined(KEYCACHE_DEBUG) -static -ulong my_blocks_available; /* number of blocks available in the LRU chain */ -#endif /* defined(KEYCACHE_DEBUG) */ -ulong my_cache_w_requests, my_cache_write, /* counters */ - my_cache_r_requests, my_cache_read; /* for statistics */ -static BLOCK_LINK - *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash table for file dirty blocks */ -static BLOCK_LINK - *file_blocks[CHANGED_BLOCKS_HASH]; /* hash table for other file blocks */ - /* that are not free */ + ulong_blocks_available; /* number of blocks available in the LRU chain */ +#endif + HASH_LINK **hash_root; /* arr. of entries into hash table buckets */ + HASH_LINK *hash_link_root; /* memory for hash table links */ + HASH_LINK *free_hash_list; /* list of free hash links */ + BLOCK_LINK *block_root; /* memory for block links */ + byte HUGE_PTR *block_mem; /* memory for block buffers */ + BLOCK_LINK *used_last; /* ptr to the last block of the LRU chain */ + pthread_mutex_t thr_lock_keycache; + KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */ + KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */ + BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/ + BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/ +} KEY_CACHE; + +static int flush_all_key_blocks(); +static void test_key_cache(KEY_CACHE *keycache, + const char *where, my_bool lock); + #define KEYCACHE_HASH(f, pos) \ - (((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (my_hash_entries-1)) +(((ulong) ((pos) >> keycache->key_cache_shift)+ \ + (ulong) (f)) & (keycache->hash_entries-1)) #define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1)) #define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log" @@ -231,9 +239,9 @@ static long keycache_thread_id; #endif /* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */ #define BLOCK_NUMBER(b) \ - ((uint) (((char*)(b) - (char *) my_block_root) / sizeof(BLOCK_LINK))) + ((uint) (((char*)(b)-(char *) keycache->block_root)/sizeof(BLOCK_LINK))) #define HASH_LINK_NUMBER(h) \ - ((uint) (((char*)(h) - (char *) my_hash_link_root) / sizeof(HASH_LINK))) + ((uint) (((char*)(h)-(char *) keycache->hash_link_root)/sizeof(HASH_LINK))) #if (defined(KEYCACHE_TIMEOUT) && !defined(__WIN__)) || defined(KEYCACHE_DEBUG) static int keycache_pthread_cond_wait(pthread_cond_t *cond, @@ -271,111 +279,131 @@ static uint next_power(uint value) return number of blocks in it */ -int init_key_cache(ulong use_mem) +int init_key_cache(void **pkeycache, uint key_cache_block_size, + ulong use_mem) { uint blocks, hash_links, length; int error; + KEY_CACHE *keycache; DBUG_ENTER("init_key_cache"); + if (!*pkeycache) + { + if (!(*pkeycache= my_malloc(sizeof(KEY_CACHE), MYF(MY_ZEROFILL)))) + DBUG_RETURN(0); + } + keycache= (KEY_CACHE *) *pkeycache; + KEYCACHE_DEBUG_OPEN; - if (key_cache_inited && my_disk_blocks > 0) + if (keycache->key_cache_inited && keycache->disk_blocks > 0) { DBUG_PRINT("warning",("key cache already in use")); DBUG_RETURN(0); } - if (! key_cache_inited) + if (! keycache->key_cache_inited) { - key_cache_inited=TRUE; - my_disk_blocks= -1; - key_cache_shift=my_bit_log2(key_cache_block_size); + keycache->key_cache_inited= TRUE; + keycache->disk_blocks= -1; + pthread_mutex_init(&keycache->thr_lock_keycache, MY_MUTEX_INIT_FAST); + keycache->key_cache_shift= my_bit_log2(key_cache_block_size); + keycache->key_cache_block_size= key_cache_block_size; DBUG_PRINT("info",("key_cache_block_size: %u", key_cache_block_size)); } - my_cache_w_requests= my_cache_r_requests= my_cache_read= my_cache_write=0; + keycache->cache_w_requests= keycache->cache_r_requests= 0; + keycache->cache_read= keycache->cache_write=0; - my_block_mem=NULL; - my_block_root=NULL; + keycache->block_mem= NULL; + keycache->block_root= NULL; blocks= (uint) (use_mem/(sizeof(BLOCK_LINK)+2*sizeof(HASH_LINK)+ sizeof(HASH_LINK*)*5/4+key_cache_block_size)); /* It doesn't make sense to have too few blocks (less than 8) */ - if (blocks >= 8 && my_disk_blocks < 0) + if (blocks >= 8 && keycache->disk_blocks < 0) { for (;;) { /* Set my_hash_entries to the next bigger 2 power */ - if ((my_hash_entries=next_power(blocks)) < blocks*5/4) - my_hash_entries<<=1; - hash_links=2*blocks; + if ((keycache->hash_entries= next_power(blocks)) < blocks*5/4) + keycache->hash_entries<<= 1; + hash_links= 2*blocks; #if defined(MAX_THREADS) if (hash_links < MAX_THREADS + blocks - 1) hash_links=MAX_THREADS + blocks - 1; #endif while ((length=(ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))+ ALIGN_SIZE(hash_links*sizeof(HASH_LINK))+ - ALIGN_SIZE(sizeof(HASH_LINK*)*my_hash_entries)))+ - ((ulong) blocks << key_cache_shift) > use_mem) + ALIGN_SIZE(sizeof(HASH_LINK*)*keycache->hash_entries)))+ + ((ulong) blocks << keycache->key_cache_shift) > use_mem) blocks--; /* Allocate memory for cache page buffers */ - if ((my_block_mem=my_malloc_lock((ulong) blocks*key_cache_block_size, - MYF(0)))) + if ((keycache->block_mem= + my_malloc_lock((ulong) blocks*keycache->key_cache_block_size, + MYF(0)))) { /* Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ - if ((my_block_root=(BLOCK_LINK*) my_malloc((uint) length,MYF(0)))) + if ((keycache->block_root= (BLOCK_LINK*) my_malloc((uint) length, + MYF(0)))) break; - my_free_lock(my_block_mem,MYF(0)); + my_free_lock(keycache->block_mem, MYF(0)); } if (blocks < 8) { - my_errno=ENOMEM; + my_errno= ENOMEM; goto err; } - blocks=blocks/4*3; + blocks= blocks/4*3; } - my_disk_blocks=(int) blocks; - my_hash_links=hash_links; - my_hash_root= (HASH_LINK**) ((char*) my_block_root + - ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); - my_hash_link_root= (HASH_LINK*) ((char*) my_hash_root + - ALIGN_SIZE((sizeof(HASH_LINK*) * - my_hash_entries))); - bzero((byte*) my_block_root, my_disk_blocks*sizeof(BLOCK_LINK)); - bzero((byte*) my_hash_root, my_hash_entries*sizeof(HASH_LINK*)); - bzero((byte*) my_hash_link_root, my_hash_links*sizeof(HASH_LINK)); - my_hash_links_used=0; - my_free_hash_list=NULL; - my_blocks_used= my_blocks_changed=0; + keycache->disk_blocks= (int) blocks; + keycache->hash_links= hash_links; + keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root + + ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); + keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root + + ALIGN_SIZE((sizeof(HASH_LINK*) * + keycache->hash_entries))); + bzero((byte*) keycache->block_root, + keycache->disk_blocks*sizeof(BLOCK_LINK)); + bzero((byte*) keycache->hash_root, + keycache->hash_entries*sizeof(HASH_LINK*)); + bzero((byte*) keycache->hash_link_root, + keycache->hash_links*sizeof(HASH_LINK)); + keycache->hash_links_used= 0; + keycache->free_hash_list= NULL; + keycache->blocks_used= keycache->blocks_changed= 0; #if defined(KEYCACHE_DEBUG) - my_blocks_available=0; + keycache->_blocks_available=0; #endif /* The LRU chain is empty after initialization */ - my_used_last=NULL; + keycache->used_last=NULL; - waiting_for_hash_link.last_thread=NULL; - waiting_for_block.last_thread=NULL; + keycache->waiting_for_hash_link.last_thread= NULL; + keycache->waiting_for_block.last_thread= NULL; DBUG_PRINT("exit", ("disk_blocks: %d block_root: %lx hash_entries: %d hash_root: %lx \ hash_links: %d hash_link_root %lx", - my_disk_blocks, my_block_root, my_hash_entries, my_hash_root, - my_hash_links, my_hash_link_root)); + keycache->disk_blocks, keycache->block_root, + keycache->hash_entries, keycache->hash_root, + keycache->hash_links, keycache->hash_link_root)); } - bzero((gptr) changed_blocks,sizeof(changed_blocks[0])*CHANGED_BLOCKS_HASH); - bzero((gptr) file_blocks,sizeof(file_blocks[0])*CHANGED_BLOCKS_HASH); + bzero((gptr) keycache->changed_blocks, + sizeof(keycache->changed_blocks[0])*CHANGED_BLOCKS_HASH); + bzero((gptr) keycache->file_blocks, + sizeof(keycache->file_blocks[0])*CHANGED_BLOCKS_HASH); DBUG_RETURN((int) blocks); err: - error=my_errno; - if (my_block_mem) - my_free_lock((gptr) my_block_mem,MYF(0)); - if (my_block_mem) - my_free((gptr) my_block_root,MYF(0)); - my_errno=error; + error= my_errno; + if (keycache->block_mem) + my_free_lock((gptr) keycache->block_mem, MYF(0)); + if (keycache->block_mem) + my_free((gptr) keycache->block_root,MYF(0)); + my_errno= error; DBUG_RETURN(0); } @@ -383,20 +411,23 @@ err: /* Resize the key cache */ -int resize_key_cache(ulong use_mem) +int resize_key_cache(void **pkeycache, ulong use_mem) { int blocks; - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + KEY_CACHE *keycache= (KEY_CACHE *) *pkeycache; + uint key_cache_block_size= keycache->key_cache_block_size; + + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); if (flush_all_key_blocks()) { /* TODO: if this happens, we should write a warning in the log file ! */ - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); return 0; } - end_key_cache(); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); + end_key_cache(pkeycache, 0); /* the following will work even if memory is 0 */ - blocks=init_key_cache(use_mem); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + blocks=init_key_cache(pkeycache, key_cache_block_size, use_mem); return blocks; } @@ -405,25 +436,33 @@ int resize_key_cache(ulong use_mem) Remove key_cache from memory */ -void end_key_cache(void) +void end_key_cache(void **pkeycache, my_bool cleanup) { + KEY_CACHE *keycache= (KEY_CACHE *) *pkeycache; DBUG_ENTER("end_key_cache"); - if (my_disk_blocks > 0) + if (keycache->disk_blocks > 0) { - if (my_block_mem) + if (keycache->block_mem) { - my_free_lock((gptr) my_block_mem,MYF(0)); - my_free((gptr) my_block_root,MYF(0)); + my_free_lock((gptr) keycache->block_mem, MYF(0)); + my_free((gptr) keycache->block_root, MYF(0)); } - my_disk_blocks= -1; + keycache->disk_blocks= -1; } KEYCACHE_DEBUG_CLOSE; - key_cache_inited=0; + keycache->key_cache_inited=0; DBUG_PRINT("status", ("used: %d changed: %d w_requests: %ld \ writes: %ld r_requests: %ld reads: %ld", - my_blocks_used, my_blocks_changed, my_cache_w_requests, - my_cache_write, my_cache_r_requests, my_cache_read)); + keycache->blocks_used, keycache->blocks_changed, + keycache->cache_w_requests, keycache->cache_write, + keycache->cache_r_requests, keycache->cache_read)); + if (cleanup) + { + pthread_mutex_destroy(&keycache->thr_lock_keycache); + my_free(*pkeycache, MYF(0)); + *pkeycache= NULL; + } DBUG_VOID_RETURN; } /* end_key_cache */ @@ -546,16 +585,16 @@ static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead) and link it to the chain of clean blocks for the specified file */ -static void link_to_file_list(BLOCK_LINK *block,int file, - my_bool unlink) +static void link_to_file_list(KEY_CACHE *keycache, + BLOCK_LINK *block, int file, my_bool unlink) { if (unlink) unlink_changed(block); - link_changed(block,&file_blocks[FILE_HASH(file)]); + link_changed(block,&keycache->file_blocks[FILE_HASH(file)]); if (block->status & BLOCK_CHANGED) { block->status&=~BLOCK_CHANGED; - my_blocks_changed--; + keycache->blocks_changed--; } } @@ -565,12 +604,14 @@ static void link_to_file_list(BLOCK_LINK *block,int file, file and link it to the chain of dirty blocks for this file */ -static inline void link_to_changed_list(BLOCK_LINK *block) +static inline void link_to_changed_list(KEY_CACHE *keycache, + BLOCK_LINK *block) { unlink_changed(block); - link_changed(block,&changed_blocks[FILE_HASH(block->hash_link->file)]); + link_changed(block, + &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]); block->status|=BLOCK_CHANGED; - my_blocks_changed++; + keycache->blocks_changed++; } @@ -578,14 +619,15 @@ static inline void link_to_changed_list(BLOCK_LINK *block) Link a block to the LRU chain at the beginning or at the end */ -static void link_block(BLOCK_LINK *block, my_bool at_end) +static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool at_end) { KEYCACHE_DBUG_ASSERT(! (block->hash_link && block->hash_link->requests)); - if (waiting_for_block.last_thread) { + if (keycache->waiting_for_block.last_thread) { /* Signal that in the LRU chain an available block has appeared */ - struct st_my_thread_var *last_thread=waiting_for_block.last_thread; - struct st_my_thread_var *first_thread=last_thread->next; - struct st_my_thread_var *next_thread=first_thread; + struct st_my_thread_var *last_thread= + keycache->waiting_for_block.last_thread; + struct st_my_thread_var *first_thread= last_thread->next; + struct st_my_thread_var *next_thread= first_thread; HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info; struct st_my_thread_var *thread; do @@ -599,44 +641,44 @@ static void link_block(BLOCK_LINK *block, my_bool at_end) if ((HASH_LINK *) thread->opt_info == hash_link) { keycache_pthread_cond_signal(&thread->suspend); - unlink_from_queue(&waiting_for_block, thread); + unlink_from_queue(&keycache->waiting_for_block, thread); block->requests++; } } while (thread != last_thread); - hash_link->block=block; + hash_link->block= block; KEYCACHE_THREAD_TRACE("link_block: after signaling"); #if defined(KEYCACHE_DEBUG) KEYCACHE_DBUG_PRINT("link_block", ("linked,unlinked block %u status=%x #requests=%u #available=%u", - BLOCK_NUMBER(block),block->status, - block->requests, my_blocks_available)); + BLOCK_NUMBER(block), block->status, + block->requests, blocks_available)); #endif return; } - if (my_used_last) + if (keycache->used_last) { - my_used_last->next_used->prev_used=&block->next_used; - block->next_used= my_used_last->next_used; - block->prev_used= &my_used_last->next_used; - my_used_last->next_used=block; + keycache->used_last->next_used->prev_used= &block->next_used; + block->next_used= keycache->used_last->next_used; + block->prev_used= &keycache->used_last->next_used; + keycache->used_last->next_used= block; if (at_end) - my_used_last=block; + keycache->used_last= block; } else { /* The LRU chain is empty */ - my_used_last=block->next_used=block; - block->prev_used=&block->next_used; + keycache->used_last=block->next_used= block; + block->prev_used= &block->next_used; } KEYCACHE_THREAD_TRACE("link_block"); #if defined(KEYCACHE_DEBUG) - my_blocks_available++; + keycache->blocks_available++; KEYCACHE_DBUG_PRINT("link_block", ("linked block %u:%1u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block),at_end,block->status, - block->requests, my_blocks_available)); - KEYCACHE_DBUG_ASSERT(my_blocks_available <= my_blocks_used); + block->requests, keycache->blocks_available)); + KEYCACHE_DBUG_ASSERT(keycache->blocks_available <= keycache->blocks_used); #endif } @@ -645,28 +687,28 @@ static void link_block(BLOCK_LINK *block, my_bool at_end) Unlink a block from the LRU chain */ -static void unlink_block(BLOCK_LINK *block) +static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block) { if (block->next_used == block) /* The list contains only one member */ - my_used_last=NULL; + keycache->used_last= NULL; else { - block->next_used->prev_used=block->prev_used; - *block->prev_used=block->next_used; - if (my_used_last == block) - my_used_last=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); + block->next_used->prev_used= block->prev_used; + *block->prev_used= block->next_used; + if (keycache->used_last == block) + keycache->used_last= STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); } - block->next_used=NULL; + block->next_used= NULL; KEYCACHE_THREAD_TRACE("unlink_block"); #if defined(KEYCACHE_DEBUG) - my_blocks_available--; + keycache->blocks_available--; KEYCACHE_DBUG_PRINT("unlink_block", ("unlinked block %u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block),block->status, - block->requests, my_blocks_available)); - KEYCACHE_DBUG_ASSERT(my_blocks_available >= 0); + block->requests, keycache->blocks_available)); + KEYCACHE_DBUG_ASSERT(keycache->blocks_available >= 0); #endif } @@ -674,11 +716,11 @@ static void unlink_block(BLOCK_LINK *block) /* Register requests for a block */ -static void reg_requests(BLOCK_LINK *block, int count) +static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count) { if (! block->requests) /* First request for the block unlinks it */ - unlink_block(block); + unlink_block(keycache, block); block->requests+=count; } @@ -688,10 +730,11 @@ static void reg_requests(BLOCK_LINK *block, int count) linking it to the LRU chain if it's the last request */ -static inline void unreg_request(BLOCK_LINK *block, int at_end) +static inline void unreg_request(KEY_CACHE *keycache, + BLOCK_LINK *block, int at_end) { if (! --block->requests) - link_block(block, (my_bool)at_end); + link_block(keycache, block, (my_bool)at_end); } /* @@ -709,13 +752,13 @@ static inline void remove_reader(BLOCK_LINK *block) Wait until the last reader of the page in block signals on its termination */ -static inline void wait_for_readers(BLOCK_LINK *block) +static inline void wait_for_readers(KEY_CACHE *keycache, BLOCK_LINK *block) { struct st_my_thread_var *thread=my_thread_var; while (block->hash_link->requests) { block->condvar=&thread->suspend; - keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); + keycache_pthread_cond_wait(&thread->suspend, &keycache->thr_lock_keycache); block->condvar=NULL; } } @@ -728,10 +771,10 @@ static inline void wait_for_readers(BLOCK_LINK *block) static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) { if (*start) - (*start)->prev=&hash_link->next; - hash_link->next=*start; - hash_link->prev=start; - *start=hash_link; + (*start)->prev= &hash_link->next; + hash_link->next= *start; + hash_link->prev= start; + *start= hash_link; } @@ -739,31 +782,32 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) Remove a hash link from the hash table */ -static void unlink_hash(HASH_LINK *hash_link) +static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link) { KEYCACHE_DBUG_PRINT("unlink_hash", ("file %u, filepos %lu #requests=%u", (uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests)); KEYCACHE_DBUG_ASSERT(hash_link->requests == 0); - if ((*hash_link->prev=hash_link->next)) - hash_link->next->prev=hash_link->prev; - hash_link->block=NULL; - if (waiting_for_hash_link.last_thread) + if ((*hash_link->prev= hash_link->next)) + hash_link->next->prev= hash_link->prev; + hash_link->block= NULL; + if (keycache->waiting_for_hash_link.last_thread) { /* Signal that A free hash link appeared */ - struct st_my_thread_var *last_thread=waiting_for_hash_link.last_thread; - struct st_my_thread_var *first_thread=last_thread->next; - struct st_my_thread_var *next_thread=first_thread; + struct st_my_thread_var *last_thread= + keycache->waiting_for_hash_link.last_thread; + struct st_my_thread_var *first_thread= last_thread->next; + struct st_my_thread_var *next_thread= first_thread; KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info); struct st_my_thread_var *thread; - hash_link->file=first_page->file; - hash_link->diskpos=first_page->filepos; + hash_link->file= first_page->file; + hash_link->diskpos= first_page->filepos; do { KEYCACHE_PAGE *page; - thread=next_thread; + thread= next_thread; page= (KEYCACHE_PAGE *) thread->opt_info; - next_thread=thread->next; + next_thread= thread->next; /* We notify about the event all threads that ask for the same page as the first thread in the queue @@ -771,16 +815,17 @@ static void unlink_hash(HASH_LINK *hash_link) if (page->file == hash_link->file && page->filepos == hash_link->diskpos) { keycache_pthread_cond_signal(&thread->suspend); - unlink_from_queue(&waiting_for_hash_link, thread); + unlink_from_queue(&keycache->waiting_for_hash_link, thread); } } while (thread != last_thread); - link_hash(&my_hash_root[KEYCACHE_HASH(hash_link->file, - hash_link->diskpos)], hash_link); + link_hash(&keycache->hash_root[KEYCACHE_HASH(hash_link->file, + hash_link->diskpos)], + hash_link); return; } - hash_link->next= my_free_hash_list; - my_free_hash_list=hash_link; + hash_link->next= keycache->free_hash_list; + keycache->free_hash_list= hash_link; } @@ -788,7 +833,8 @@ static void unlink_hash(HASH_LINK *hash_link) Get the hash link for a page */ -static HASH_LINK *get_hash_link(int file, my_off_t filepos) +static HASH_LINK *get_hash_link(KEY_CACHE *keycache, + int file, my_off_t filepos) { reg1 HASH_LINK *hash_link, **start; KEYCACHE_PAGE page; @@ -805,9 +851,9 @@ restart: start contains the head of the bucket list, hash_link points to the first member of the list */ - hash_link= *(start= &my_hash_root[KEYCACHE_HASH(file, filepos)]); + hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]); #if defined(KEYCACHE_DEBUG) - cnt=0; + cnt= 0; #endif /* Look for an element for the pair (file, filepos) in the bucket chain */ while (hash_link && @@ -819,42 +865,43 @@ restart: if (! (cnt <= my_hash_links_used)) { int i; - for (i=0, hash_link=*start ; - i < cnt ; i++, hash_link=hash_link->next) + for (i=0, hash_link= *start ; + i < cnt ; i++, hash_link= hash_link->next) { KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu", (uint) hash_link->file,(ulong) hash_link->diskpos)); } } - KEYCACHE_DBUG_ASSERT(cnt <= my_hash_links_used); + KEYCACHE_DBUG_ASSERT(cnt <= keycache->hash_links_used); #endif } if (! hash_link) { /* There is no hash link in the hash table for the pair (file, filepos) */ - if (my_free_hash_list) + if (keycache->free_hash_list) { - hash_link= my_free_hash_list; - my_free_hash_list=hash_link->next; + hash_link= keycache->free_hash_list; + keycache->free_hash_list=hash_link->next; } - else if (my_hash_links_used < my_hash_links) + else if (keycache->hash_links_used < keycache->hash_links) { - hash_link= &my_hash_link_root[my_hash_links_used++]; + hash_link= &keycache->hash_link_root[keycache->hash_links_used++]; } else { /* Wait for a free hash link */ - struct st_my_thread_var *thread=my_thread_var; + struct st_my_thread_var *thread= my_thread_var; KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting")); page.file=file; page.filepos=filepos; thread->opt_info= (void *) &page; - link_into_queue(&waiting_for_hash_link, thread); - keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); - thread->opt_info=NULL; + link_into_queue(&keycache->waiting_for_hash_link, thread); + keycache_pthread_cond_wait(&thread->suspend, + &keycache->thr_lock_keycache); + thread->opt_info= NULL; goto restart; } - hash_link->file=file; - hash_link->diskpos=filepos; + hash_link->file= file; + hash_link->diskpos= filepos; link_hash(start, hash_link); } /* Register the request for the page */ @@ -870,12 +917,13 @@ restart: return the lru block after saving its buffer if the page is dirty */ -static BLOCK_LINK *find_key_block(int file, my_off_t filepos, +static BLOCK_LINK *find_key_block(KEY_CACHE *keycache, + int file, my_off_t filepos, int wrmode, int *page_st) { HASH_LINK *hash_link; BLOCK_LINK *block; - int error=0; + int error= 0; int page_status; DBUG_ENTER("find_key_block"); @@ -885,17 +933,18 @@ static BLOCK_LINK *find_key_block(int file, my_off_t filepos, KEYCACHE_DBUG_PRINT("find_key_block", ("file %u, filepos %lu, wrmode %lu", (uint) file,(ulong) filepos,(uint) wrmode)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) - DBUG_EXECUTE("check_keycache2",test_key_cache("start of find_key_block",0);); + DBUG_EXECUTE("check_keycache2", + test_key_cache(keycache, "start of find_key_block", 0);); #endif restart: /* Find the hash link for the requested page (file, filepos) */ - hash_link=get_hash_link(file, filepos); + hash_link= get_hash_link(keycache, file, filepos); - page_status=-1; - if ((block=hash_link->block) && + page_status= -1; + if ((block= hash_link->block) && block->hash_link == hash_link && (block->status & BLOCK_READ)) - page_status=PAGE_READ; + page_status= PAGE_READ; if (page_status == PAGE_READ && (block->status & BLOCK_IN_SWITCH)) { @@ -907,7 +956,7 @@ restart: all others are to be suspended, then resubmitted */ if (!wrmode && !(block->status & BLOCK_REASSIGNED)) - reg_requests(block,1); + reg_requests(keycache, block,1); else { hash_link->requests--; @@ -920,7 +969,8 @@ restart: /* Wait until the request can be resubmitted */ do { - keycache_pthread_cond_wait(&thread->suspend, &THR_LOCK_keycache); + keycache_pthread_cond_wait(&thread->suspend, + &keycache->thr_lock_keycache); } while(thread->next); } @@ -936,23 +986,24 @@ restart: if (! block) { /* No block is assigned for the page yet */ - if (my_blocks_used < (uint) my_disk_blocks) + if (keycache->blocks_used < (uint) keycache->disk_blocks) { /* There are some never used blocks, take first of them */ - hash_link->block=block= &my_block_root[my_blocks_used]; - block->buffer=ADD_TO_PTR(my_block_mem, - ((ulong) my_blocks_used*key_cache_block_size), - byte*); - block->status=0; - block->length=0; - block->offset=key_cache_block_size; - block->requests=1; - my_blocks_used++; - link_to_file_list(block, file, 0); - block->hash_link=hash_link; - page_status=PAGE_TO_BE_READ; + hash_link->block= block= &keycache->block_root[keycache->blocks_used]; + block->buffer= ADD_TO_PTR(keycache->block_mem, + ((ulong) keycache->blocks_used* + keycache->key_cache_block_size), + byte*); + block->status= 0; + block->length= 0; + block->offset= keycache->key_cache_block_size; + block->requests= 1; + keycache->blocks_used++; + link_to_file_list(keycache, block, file, 0); + block->hash_link= hash_link; + page_status= PAGE_TO_BE_READ; KEYCACHE_DBUG_PRINT("find_key_block", - ("got never used block %u",BLOCK_NUMBER(block))); + ("got never used block %u", BLOCK_NUMBER(block))); } else { @@ -963,28 +1014,29 @@ restart: all of them must get the same block */ - if (! my_used_last) + if (! keycache->used_last) { - struct st_my_thread_var *thread=my_thread_var; - thread->opt_info=(void *) hash_link; - link_into_queue(&waiting_for_block, thread); + struct st_my_thread_var *thread= my_thread_var; + thread->opt_info= (void *) hash_link; + link_into_queue(&keycache->waiting_for_block, thread); do { - keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); + keycache_pthread_cond_wait(&thread->suspend, + &keycache->thr_lock_keycache); } while (thread->next); - thread->opt_info=NULL; + thread->opt_info= NULL; } - block=hash_link->block; + block= hash_link->block; if (! block) { /* Take the first block from the LRU chain unlinking it from the chain */ - block= my_used_last->next_used; - reg_requests(block,1); - hash_link->block=block; + block= keycache->used_last->next_used; + reg_requests(keycache, block,1); + hash_link->block= block; } if (block->hash_link != hash_link && @@ -994,27 +1046,27 @@ restart: block->status|=BLOCK_IN_SWITCH; KEYCACHE_DBUG_PRINT("find_key_block", - ("got block %u for new page",BLOCK_NUMBER(block))); + ("got block %u for new page", BLOCK_NUMBER(block))); if (block->status & BLOCK_CHANGED) { /* The block contains a dirty page - push it out of the cache */ - KEYCACHE_DBUG_PRINT("find_key_block",("block is dirty")); + KEYCACHE_DBUG_PRINT("find_key_block", ("block is dirty")); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); /* The call is thread safe because only the current thread might change the block->hash_link value */ - error=my_pwrite(block->hash_link->file,block->buffer, - block->length,block->hash_link->diskpos, + error=my_pwrite(block->hash_link->file, block->buffer, + block->length, block->hash_link->diskpos, MYF(MY_NABP | MY_WAIT_IF_FULL)); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); - my_cache_write++; + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); + keycache->cache_write++; } - block->status|=BLOCK_REASSIGNED; + block->status|= BLOCK_REASSIGNED; if (block->hash_link) { /* @@ -1023,20 +1075,21 @@ restart: (we could have avoided this waiting, if we had read a page in the cache in a sweep, without yielding control) */ - wait_for_readers(block); + wait_for_readers(keycache, block); /* Remove the hash link for this page from the hash table */ - unlink_hash(block->hash_link); + unlink_hash(keycache, block->hash_link); /* All pending requests for this page must be resubmitted */ if (block->wqueue[COND_FOR_SAVED].last_thread) release_queue(&block->wqueue[COND_FOR_SAVED]); } - link_to_file_list(block, file, (my_bool)(block->hash_link ? 1 : 0)); - block->status=error? BLOCK_ERROR : 0; - block->length=0; - block->offset=key_cache_block_size; - block->hash_link=hash_link; - page_status=PAGE_TO_BE_READ; + link_to_file_list(keycache, block, file, + (my_bool)(block->hash_link ? 1 : 0)); + block->status= error? BLOCK_ERROR : 0; + block->length= 0; + block->offset= keycache->key_cache_block_size; + block->hash_link= hash_link; + page_status= PAGE_TO_BE_READ; KEYCACHE_DBUG_ASSERT(block->hash_link->block == block); KEYCACHE_DBUG_ASSERT(hash_link->block->hash_link == hash_link); @@ -1044,17 +1097,17 @@ restart: else { /* This is for secondary requests for a new page only */ - page_status = block->hash_link == hash_link && - (block->status & BLOCK_READ) ? - PAGE_READ : PAGE_WAIT_TO_BE_READ; + page_status= block->hash_link == hash_link && + (block->status & BLOCK_READ) ? + PAGE_READ : PAGE_WAIT_TO_BE_READ; } } - my_cache_read++; + keycache->cache_read++; } else { - reg_requests(block,1); + reg_requests(keycache, block, 1); page_status = block->hash_link == hash_link && (block->status & BLOCK_READ) ? PAGE_READ : PAGE_WAIT_TO_BE_READ; @@ -1068,7 +1121,8 @@ restart: (uint) file,(ulong) filepos,(uint) page_status)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) - DBUG_EXECUTE("check_keycache2",test_key_cache("end of find_key_block",0);); + DBUG_EXECUTE("check_keycache2", + test_key_cache(keycache, "end of find_key_block",0);); #endif KEYCACHE_THREAD_TRACE("find_key_block:end"); DBUG_RETURN(block); @@ -1081,7 +1135,8 @@ restart: portion is less than read_length, but not less than min_length */ -static void read_block(BLOCK_LINK *block, uint read_length, +static void read_block(KEY_CACHE *keycache, + BLOCK_LINK *block, uint read_length, uint min_length, my_bool primary) { uint got_length; @@ -1100,16 +1155,16 @@ static void read_block(BLOCK_LINK *block, uint read_length, ("page to be read by primary request")); /* Page is not in buffer yet, is to be read from disk */ - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - got_length=my_pread(block->hash_link->file,block->buffer, - read_length,block->hash_link->diskpos,MYF(0)); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); + got_length= my_pread(block->hash_link->file, block->buffer, + read_length, block->hash_link->diskpos, MYF(0)); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); if (got_length < min_length) - block->status|=BLOCK_ERROR; + block->status|= BLOCK_ERROR; else { - block->status=BLOCK_READ; - block->length=got_length; + block->status= BLOCK_READ; + block->length= got_length; } KEYCACHE_DBUG_PRINT("read_block", ("primary request: new page in cache")); @@ -1128,10 +1183,11 @@ static void read_block(BLOCK_LINK *block, uint read_length, { struct st_my_thread_var *thread=my_thread_var; /* Put the request into a queue and wait until it can be processed */ - add_to_queue(&block->wqueue[COND_FOR_REQUESTED],thread); + add_to_queue(&block->wqueue[COND_FOR_REQUESTED], thread); do { - keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); + keycache_pthread_cond_wait(&thread->suspend, + &keycache->thr_lock_keycache); } while (thread->next); } @@ -1150,27 +1206,29 @@ static void read_block(BLOCK_LINK *block, uint read_length, returns adress from where data is read */ -byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, +byte *key_cache_read(void *pkeycache, + File file, my_off_t filepos, byte *buff, uint length, uint block_length __attribute__((unused)), int return_buffer __attribute__((unused))) { int error=0; + KEY_CACHE *keycache= (KEY_CACHE *) pkeycache; DBUG_ENTER("key_cache_read"); DBUG_PRINT("enter", ("file %u, filepos %lu, length %u", (uint) file,(ulong) filepos,length)); - if (my_disk_blocks > 0) + if (keycache->disk_blocks > 0) { /* Key cache is used */ reg1 BLOCK_LINK *block; - uint offset= (uint) (filepos & (key_cache_block_size-1)); - byte *start=buff; + uint offset= (uint) (filepos & (keycache->key_cache_block_size-1)); + byte *start= buff; uint read_length; uint status; int page_st; #ifndef THREAD - if (block_length > key_cache_block_size || offset) + if (block_length > keycache->key_cache_block_size || offset) return_buffer=0; #endif @@ -1178,16 +1236,17 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, filepos-= offset; do { - read_length= length > key_cache_block_size ? - key_cache_block_size : length; + read_length= length > keycache->key_cache_block_size ? + keycache->key_cache_block_size : length; KEYCACHE_DBUG_ASSERT(read_length > 0); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); - my_cache_r_requests++; - block=find_key_block(file,filepos,0,&page_st); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); + keycache->cache_r_requests++; + block=find_key_block(keycache, file, filepos, 0, &page_st); if (block->status != BLOCK_ERROR && page_st != PAGE_READ) { /* The requested page is to be read into the block buffer */ - read_block(block,key_cache_block_size,read_length+offset, + read_block(keycache, block, + keycache->key_cache_block_size, read_length+offset, (my_bool)(page_st == PAGE_TO_BE_READ)); } else if (! (block->status & BLOCK_ERROR) && @@ -1198,28 +1257,28 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, this could only happen if we are using a file with small key blocks and are trying to read outside the file */ - my_errno=-1; - block->status|=BLOCK_ERROR; + my_errno= -1; + block->status|= BLOCK_ERROR; } - if (! ((status=block->status) & BLOCK_ERROR)) + if (! ((status= block->status) & BLOCK_ERROR)) { #ifndef THREAD if (! return_buffer) #endif { #if !defined(SERIALIZED_READ_FROM_CACHE) - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); #endif /* Copy data from the cache buffer */ if (!(read_length & 511)) - bmove512(buff,block->buffer+offset,read_length); + bmove512(buff, block->buffer+offset, read_length); else - memcpy(buff,block->buffer+offset,(size_t) read_length); + memcpy(buff, block->buffer+offset, (size_t) read_length); #if !defined(SERIALIZED_READ_FROM_CACHE) - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); #endif } } @@ -1229,9 +1288,9 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, Link the block into the LRU chain if it's the last submitted request for the block */ - unreg_request(block,1); + unreg_request(keycache, block,1); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); if (status & BLOCK_ERROR) DBUG_RETURN((byte *) 0); @@ -1241,19 +1300,21 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, return (block->buffer); #endif - buff+=read_length; - filepos+=read_length; - offset=0; + buff+= read_length; + filepos+= read_length; + offset= 0; } while ((length-= read_length)); DBUG_RETURN(start); } /* Key cache is not used */ - statistic_increment(my_cache_r_requests,&THR_LOCK_keycache); - statistic_increment(my_cache_read,&THR_LOCK_keycache); - if (my_pread(file,(byte*) buff,length,filepos,MYF(MY_NABP))) - error=1; + statistic_increment(keycache->cache_r_requests, + &keycache->thr_lock_keycache); + statistic_increment(keycache->cache_read, + &keycache->thr_lock_keycache); + if (my_pread(file, (byte*) buff, length, filepos, MYF(MY_NABP))) + error= 1; DBUG_RETURN(error? (byte*) 0 : buff); } @@ -1272,17 +1333,19 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, 0 if a success, 1 -otherwise. */ -int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length) +int key_cache_insert(void *pkeycache, + File file, my_off_t filepos, byte *buff, uint length) { + KEY_CACHE *keycache= (KEY_CACHE *) pkeycache; DBUG_ENTER("key_cache_insert"); DBUG_PRINT("enter", ("file %u, filepos %lu, length %u", (uint) file,(ulong) filepos, length)); - if (my_disk_blocks > 0) + if (keycache->disk_blocks > 0) { /* Key cache is used */ reg1 BLOCK_LINK *block; - uint offset= (uint) (filepos & (key_cache_block_size-1)); + uint offset= (uint) (filepos & (keycache->key_cache_block_size-1)); uint read_length; int page_st; @@ -1290,17 +1353,17 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length) filepos-= offset; do { - read_length= length > key_cache_block_size ? - key_cache_block_size : length; + read_length= length > keycache->key_cache_block_size ? + keycache->key_cache_block_size : length; KEYCACHE_DBUG_ASSERT(read_length > 0); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); - my_cache_r_requests++; - block=find_key_block(file, filepos, 0, &page_st); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); + keycache->cache_r_requests++; + block= find_key_block(keycache, file, filepos, 0, &page_st); if (block->status != BLOCK_ERROR && page_st != PAGE_READ) { /* The requested page is to be read into the block buffer */ #if !defined(SERIALIZED_READ_FROM_CACHE) - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); #endif /* Copy data from buff */ @@ -1310,7 +1373,7 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length) memcpy(block->buffer+offset, buff, (size_t) read_length); #if !defined(SERIALIZED_READ_FROM_CACHE) - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); #endif block->status= BLOCK_READ; block->length= read_length+offset; @@ -1321,15 +1384,15 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length) Link the block into the LRU chain if it's the last submitted request for the block */ - unreg_request(block,1); + unreg_request(keycache, block,1); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); if (block->status & BLOCK_ERROR) DBUG_RETURN(1); - buff+=read_length; - filepos+=read_length; + buff+= read_length; + filepos+= read_length; offset=0; } while ((length-= read_length)); @@ -1346,92 +1409,96 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length) have been flushed from key cache before the function starts */ -int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, +int key_cache_write(void *pkeycache, + File file, my_off_t filepos, byte *buff, uint length, uint block_length __attribute__((unused)), int dont_write) { reg1 BLOCK_LINK *block; int error=0; + KEY_CACHE *keycache= (KEY_CACHE *) pkeycache; DBUG_ENTER("key_cache_write"); DBUG_PRINT("enter", ("file %u, filepos %lu, length %u block_length %u", - (uint) file,(ulong) filepos,length,block_length)); + (uint) file, (ulong) filepos, length, block_length)); if (!dont_write) { /* Force writing from buff into disk */ - statistic_increment(my_cache_write, &THR_LOCK_keycache); - if (my_pwrite(file,buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL))) + statistic_increment(keycache->cache_write, + &keycache->thr_lock_keycache); + if (my_pwrite(file, buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL))) DBUG_RETURN(1); } #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) - DBUG_EXECUTE("check_keycache",test_key_cache("start of key_cache_write",1);); + DBUG_EXECUTE("check_keycache", + test_key_cache(keycache, "start of key_cache_write", 1);); #endif - if (my_disk_blocks > 0) + if (keycache->disk_blocks > 0) { /* Key cache is used */ uint read_length; - uint offset= (uint) (filepos & (key_cache_block_size-1)); + uint offset= (uint) (filepos & (keycache->key_cache_block_size-1)); int page_st; /* Write data in key_cache_block_size increments */ filepos-= offset; do { - read_length= length > key_cache_block_size ? - key_cache_block_size : length; + read_length= length > keycache->key_cache_block_size ? + keycache->key_cache_block_size : length; KEYCACHE_DBUG_ASSERT(read_length > 0); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); - my_cache_w_requests++; - block=find_key_block(file, filepos, 1, &page_st); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); + keycache->cache_w_requests++; + block= find_key_block(keycache, file, filepos, 1, &page_st); if (block->status != BLOCK_ERROR && page_st != PAGE_READ && - (offset || read_length < key_cache_block_size)) - read_block(block, - offset + read_length >= key_cache_block_size? - offset : key_cache_block_size, + (offset || read_length < keycache->key_cache_block_size)) + read_block(keycache, block, + offset + read_length >= keycache->key_cache_block_size? + offset : keycache->key_cache_block_size, offset,(my_bool)(page_st == PAGE_TO_BE_READ)); if (!dont_write) { /* buff has been written to disk at start */ if ((block->status & BLOCK_CHANGED) && - (!offset && read_length >= key_cache_block_size)) - link_to_file_list(block, block->hash_link->file, 1); + (!offset && read_length >= keycache->key_cache_block_size)) + link_to_file_list(keycache, block, block->hash_link->file, 1); } else if (! (block->status & BLOCK_CHANGED)) - link_to_changed_list(block); + link_to_changed_list(keycache, block); - set_if_smaller(block->offset,offset) - set_if_bigger(block->length,read_length+offset); + set_if_smaller(block->offset, offset) + set_if_bigger(block->length, read_length+offset); if (! (block->status & BLOCK_ERROR)) { if (!(read_length & 511)) - bmove512(block->buffer+offset,buff,read_length); + bmove512(block->buffer+offset, buff, read_length); else - memcpy(block->buffer+offset,buff,(size_t) read_length); + memcpy(block->buffer+offset, buff, (size_t) read_length); } block->status|=BLOCK_READ; /* Unregister the request */ block->hash_link->requests--; - unreg_request(block,1); + unreg_request(keycache, block, 1); if (block->status & BLOCK_ERROR) { - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - error=1; + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); + error= 1; break; } - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); - buff+=read_length; - filepos+=read_length; - offset=0; + buff+= read_length; + filepos+= read_length; + offset= 0; } while ((length-= read_length)); } @@ -1440,15 +1507,19 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, /* Key cache is not used */ if (dont_write) { - statistic_increment(my_cache_w_requests, &THR_LOCK_keycache); - statistic_increment(my_cache_write, &THR_LOCK_keycache); - if (my_pwrite(file,(byte*) buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL))) + statistic_increment(keycache->cache_w_requests, + &keycache->thr_lock_keycache); + statistic_increment(keycache->cache_write, + &keycache->thr_lock_keycache); + if (my_pwrite(file, (byte*) buff, length, filepos, + MYF(MY_NABP | MY_WAIT_IF_FULL))) error=1; } } #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) - DBUG_EXECUTE("exec",test_key_cache("end of key_cache_write",1);); + DBUG_EXECUTE("exec", + test_key_cache(keycache, "end of key_cache_write", 1);); #endif DBUG_RETURN(error); } @@ -1460,27 +1531,27 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, and add it at the beginning of the LRU chain */ -static void free_block(BLOCK_LINK *block) +static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block) { KEYCACHE_THREAD_TRACE("free block"); KEYCACHE_DBUG_PRINT("free_block", ("block %u to be freed",BLOCK_NUMBER(block))); if (block->hash_link) { - block->status|=BLOCK_REASSIGNED; - wait_for_readers(block); - unlink_hash(block->hash_link); + block->status|= BLOCK_REASSIGNED; + wait_for_readers(keycache, block); + unlink_hash(keycache, block->hash_link); } unlink_changed(block); - block->status=0; - block->length=0; - block->offset=key_cache_block_size; + block->status= 0; + block->length= 0; + block->offset= keycache->key_cache_block_size; KEYCACHE_THREAD_TRACE("free block"); KEYCACHE_DBUG_PRINT("free_block", ("block is freed")); - unreg_request(block,0); - block->hash_link=NULL; + unreg_request(keycache, block, 0); + block->hash_link= NULL; } @@ -1496,51 +1567,53 @@ static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b) free used blocks if requested */ -static int flush_cached_blocks(File file, BLOCK_LINK **cache, +static int flush_cached_blocks(KEY_CACHE *keycache, + File file, BLOCK_LINK **cache, BLOCK_LINK **end, enum flush_type type) { int error; - int last_errno=0; - uint count=end-cache; + int last_errno= 0; + uint count= end-cache; /* Don't lock the cache during the flush */ - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); /* As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH we are guarunteed no thread will change them */ - qsort((byte*) cache,count,sizeof(*cache),(qsort_cmp) cmp_sec_link); + qsort((byte*) cache, count, sizeof(*cache), (qsort_cmp) cmp_sec_link); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); for ( ; cache != end ; cache++) { BLOCK_LINK *block= *cache; KEYCACHE_DBUG_PRINT("flush_cached_blocks", ("block %u to be flushed", BLOCK_NUMBER(block))); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - error=my_pwrite(file,block->buffer+block->offset,block->length, - block->hash_link->diskpos,MYF(MY_NABP | MY_WAIT_IF_FULL)); - keycache_pthread_mutex_lock(&THR_LOCK_keycache); - my_cache_write++; + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); + error= my_pwrite(file, block->buffer+block->offset, block->length, + block->hash_link->diskpos, + MYF(MY_NABP | MY_WAIT_IF_FULL)); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); + keycache->cache_write++; if (error) { block->status|= BLOCK_ERROR; if (!last_errno) - last_errno=errno ? errno : -1; + last_errno= errno ? errno : -1; } /* type will never be FLUSH_IGNORE_CHANGED here */ if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) { - my_blocks_changed--; - free_block(block); + keycache->blocks_changed--; + free_block(keycache, block); } else { - block->status&=~BLOCK_IN_FLUSH; - link_to_file_list(block,file,1); - unreg_request(block,1); + block->status&= ~BLOCK_IN_FLUSH; + link_to_file_list(keycache, block, file, 1); + unreg_request(keycache, block, 1); } } @@ -1552,29 +1625,32 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache, Flush all blocks for a file to disk */ -int flush_key_blocks(File file, enum flush_type type) +int flush_key_blocks(void *pkeycache, + File file, enum flush_type type) { - int last_errno=0; BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache; + int last_errno= 0; + KEY_CACHE *keycache= (KEY_CACHE *) pkeycache; DBUG_ENTER("flush_key_blocks"); DBUG_PRINT("enter",("file: %d blocks_used: %d blocks_changed: %d", - file, my_blocks_used, my_blocks_changed)); + file, keycache->blocks_used, keycache->blocks_changed)); #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) - DBUG_EXECUTE("check_keycache",test_key_cache("start of flush_key_blocks",0);); + DBUG_EXECUTE("check_keycache", + test_key_cache(keycache, "start of flush_key_blocks", 0);); #endif - keycache_pthread_mutex_lock(&THR_LOCK_keycache); + keycache_pthread_mutex_lock(&keycache->thr_lock_keycache); - cache=cache_buff; - if (my_disk_blocks > 0 && + cache= cache_buff; + if (keycache->disk_blocks > 0 && (!my_disable_flush_key_blocks || type != FLUSH_KEEP)) { /* Key cache exists and flush is not disabled */ - int error=0; - uint count=0; + int error= 0; + uint count= 0; BLOCK_LINK **pos,**end; - BLOCK_LINK *first_in_switch=NULL; + BLOCK_LINK *first_in_switch= NULL; BLOCK_LINK *block, *next; #if defined(KEYCACHE_DEBUG) uint cnt=0; @@ -1586,37 +1662,38 @@ int flush_key_blocks(File file, enum flush_type type) Count how many key blocks we have to cache to be able to flush all dirty pages with minimum seek moves */ - for (block=changed_blocks[FILE_HASH(file)] ; + for (block= keycache->changed_blocks[FILE_HASH(file)] ; block ; - block=block->next_changed) + block= block->next_changed) { if (block->hash_link->file == file) { count++; - KEYCACHE_DBUG_ASSERT(count<= my_blocks_used); + KEYCACHE_DBUG_ASSERT(count<= keycache->blocks_used); } } /* Allocate a new buffer only if its bigger than the one we have */ if (count > FLUSH_CACHE && - !(cache=(BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,MYF(0)))) + !(cache= (BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count, + MYF(0)))) { - cache=cache_buff; - count=FLUSH_CACHE; + cache= cache_buff; + count= FLUSH_CACHE; } } /* Retrieve the blocks and write them to a buffer to be flushed */ restart: - end=(pos=cache)+count; - for (block=changed_blocks[FILE_HASH(file)] ; + end= (pos= cache)+count; + for (block= keycache->changed_blocks[FILE_HASH(file)] ; block ; - block=next) + block= next) { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif - next=block->next_changed; + next= block->next_changed; if (block->hash_link->file == file) { /* @@ -1632,7 +1709,7 @@ restart: We care only for the blocks for which flushing was not initiated by other threads as a result of page swapping */ - reg_requests(block,1); + reg_requests(keycache, block, 1); if (type != FLUSH_IGNORE_CHANGED) { /* It's not a temporary file */ @@ -1642,7 +1719,8 @@ restart: This happens only if there is not enough memory for the big block */ - if ((error=flush_cached_blocks(file,cache,end,type))) + if ((error= flush_cached_blocks(keycache, file, cache, + end,type))) last_errno=error; /* Restart the scan as some other thread might have changed @@ -1651,47 +1729,48 @@ restart: */ goto restart; } - *pos++=block; + *pos++= block; } else { /* It's a temporary file */ - my_blocks_changed--; - free_block(block); + keycache->blocks_changed--; + free_block(keycache, block); } } else { /* Link the block into a list of blocks 'in switch' */ unlink_changed(block); - link_changed(block,&first_in_switch); + link_changed(block, &first_in_switch); } } } if (pos != cache) { - if ((error=flush_cached_blocks(file,cache,pos,type))) - last_errno=error; + if ((error= flush_cached_blocks(keycache, file, cache, pos, type))) + last_errno= error; } /* Wait until list of blocks in switch is empty */ while (first_in_switch) { #if defined(KEYCACHE_DEBUG) - cnt=0; + cnt= 0; #endif - block=first_in_switch; + block= first_in_switch; { - struct st_my_thread_var *thread=my_thread_var; + struct st_my_thread_var *thread= my_thread_var; add_to_queue(&block->wqueue[COND_FOR_SAVED], thread); do { - keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); + keycache_pthread_cond_wait(&thread->suspend, + &keycache->thr_lock_keycache); } while (thread->next); } #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif } /* The following happens very seldom */ @@ -1700,34 +1779,34 @@ restart: #if defined(KEYCACHE_DEBUG) cnt=0; #endif - for (block=file_blocks[FILE_HASH(file)] ; + for (block= keycache->file_blocks[FILE_HASH(file)] ; block ; - block=next) + block= next) { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif - next=block->next_changed; + next= block->next_changed; if (block->hash_link->file == file && (! (block->status & BLOCK_CHANGED) || type == FLUSH_IGNORE_CHANGED)) { - reg_requests(block,1); - free_block(block); + reg_requests(keycache, block, 1); + free_block(keycache, block); } } } } - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&keycache->thr_lock_keycache); #ifndef DBUG_OFF DBUG_EXECUTE("check_keycache", - test_key_cache("end of flush_key_blocks",0);); + test_key_cache(keycache, "end of flush_key_blocks", 0);); #endif if (cache != cache_buff) - my_free((gptr) cache,MYF(0)); + my_free((gptr) cache, MYF(0)); if (last_errno) errno=last_errno; /* Return first error */ DBUG_RETURN(last_errno != 0); @@ -1738,27 +1817,27 @@ restart: Flush all blocks in the key cache to disk */ -static int flush_all_key_blocks() +static int flush_all_key_blocks(KEY_CACHE *keycache) { #if defined(KEYCACHE_DEBUG) uint cnt=0; #endif - while (my_blocks_changed > 0) + while (keycache->blocks_changed > 0) { BLOCK_LINK *block; - for (block= my_used_last->next_used ; ; block=block->next_used) + for (block= keycache->used_last->next_used ; ; block=block->next_used) { if (block->hash_link) { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used); #endif - if (flush_key_blocks(block->hash_link->file, FLUSH_RELEASE)) + if (flush_key_blocks(keycache, block->hash_link->file, FLUSH_RELEASE)) return 1; break; } - if (block == my_used_last) + if (block == keycache->used_last) break; } } @@ -1770,7 +1849,8 @@ static int flush_all_key_blocks() /* Test if disk-cache is ok */ -static void test_key_cache(const char *where __attribute__((unused)), +static void test_key_cache(KEY_CACHE *keycache, + const char *where __attribute__((unused)), my_bool lock __attribute__((unused))) { /* TODO */ @@ -1783,10 +1863,10 @@ static void test_key_cache(const char *where __attribute__((unused)), #define MAX_QUEUE_LEN 100 -static void keycache_dump() +static void keycache_dump(KEY_CACHE *keycache) { FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w"); - struct st_my_thread_var *thread_var =my_thread_var; + struct st_my_thread_var *thread_var= my_thread_var; struct st_my_thread_var *last; struct st_my_thread_var *thread; BLOCK_LINK *block; @@ -1829,10 +1909,10 @@ static void keycache_dump() } while (thread != last); - for (i=0 ; i< my_blocks_used ; i++) + for (i=0 ; i< keycache->blocks_used ; i++) { int j; - block= &my_block_root[i]; + block= &keycache->block_root[i]; hash_link= block->hash_link; fprintf(keycache_dump_file, "block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d\n", @@ -1858,16 +1938,16 @@ static void keycache_dump() } } fprintf(keycache_dump_file, "LRU chain:"); - block= my_used_last; + block= keycache= used_last; if (block) { do { - block=block->next_used; + block= block->next_used; fprintf(keycache_dump_file, "block:%u, ", BLOCK_NUMBER(block)); } - while (block != my_used_last); + while (block != keycache->used_last); } fprintf(keycache_dump_file, "\n"); |