summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <igor@rurik.mysql.com>2003-10-30 10:45:28 -0800
committerunknown <igor@rurik.mysql.com>2003-10-30 10:45:28 -0800
commit52e86548c2ed957e1bef549c38a5b391cd3f47c6 (patch)
treebfd8508896ec045400e82650920bcfd37e53a8f1
parenta4161274ef18a38eff95272cf9f05f46fb08fcb7 (diff)
parent06ecf87e15d8e8d7ae24aa23ccead4a2c4bbc78e (diff)
downloadmariadb-git-52e86548c2ed957e1bef549c38a5b391cd3f47c6.tar.gz
Merge
include/my_base.h: Auto merged include/my_global.h: Auto merged include/my_sys.h: Auto merged isam/test2.c: Auto merged myisam/mi_check.c: Auto merged myisam/mi_test2.c: Auto merged myisam/myisamchk.c: Auto merged myisam/myisamdef.h: Auto merged mysql-test/r/func_group.result: Auto merged sql/ha_myisam.cc: Auto merged sql/handler.cc: Auto merged sql/handler.h: Auto merged sql/item_cmpfunc.h: Auto merged sql/mysql_priv.h: Auto merged sql/opt_range.cc: Auto merged sql/set_var.h: Auto merged sql/sql_base.cc: Auto merged sql/sql_lex.cc: Auto merged sql/sql_lex.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_test.cc: Auto merged sql/sql_yacc.yy: Auto merged sql/mysqld.cc: SCCS merged sql/set_var.cc: SCCS merged
-rw-r--r--include/my_base.h4
-rw-r--r--include/my_global.h3
-rw-r--r--include/my_sys.h73
-rw-r--r--include/myisam.h3
-rw-r--r--isam/_locking.c4
-rw-r--r--isam/_page.c24
-rw-r--r--isam/close.c3
-rw-r--r--isam/isamchk.c13
-rw-r--r--isam/isamlog.c8
-rw-r--r--isam/panic.c2
-rw-r--r--isam/test2.c4
-rw-r--r--isam/test3.c2
-rw-r--r--myisam/Makefile.am2
-rw-r--r--myisam/mi_check.c45
-rw-r--r--myisam/mi_close.c2
-rw-r--r--myisam/mi_delete.c40
-rw-r--r--myisam/mi_delete_all.c2
-rw-r--r--myisam/mi_extra.c7
-rw-r--r--myisam/mi_keycache.c107
-rw-r--r--myisam/mi_locking.c14
-rw-r--r--myisam/mi_page.c22
-rw-r--r--myisam/mi_panic.c2
-rw-r--r--myisam/mi_preload.c10
-rw-r--r--myisam/mi_range.c2
-rw-r--r--myisam/mi_search.c10
-rw-r--r--myisam/mi_test1.c2
-rw-r--r--myisam/mi_test2.c12
-rw-r--r--myisam/mi_test3.c2
-rw-r--r--myisam/mi_write.c27
-rw-r--r--myisam/myisamchk.c15
-rw-r--r--myisam/myisamdef.h11
-rw-r--r--myisam/myisamlog.c7
-rw-r--r--myisam/rt_index.c49
-rw-r--r--myisam/rt_key.c3
-rw-r--r--myisam/rt_split.c6
-rw-r--r--mysql-test/r/func_group.result4
-rw-r--r--mysql-test/r/key_cache.result191
-rw-r--r--mysql-test/r/range.result155
-rw-r--r--mysql-test/t/key_cache.test82
-rw-r--r--mysql-test/t/range.test107
-rw-r--r--mysys/mf_keycache.c1498
-rw-r--r--sql/ha_myisam.cc131
-rw-r--r--sql/ha_myisam.h1
-rw-r--r--sql/ha_myisammrg.cc6
-rw-r--r--sql/handler.cc72
-rw-r--r--sql/handler.h7
-rw-r--r--sql/item_cmpfunc.h2
-rw-r--r--sql/mysql_priv.h9
-rw-r--r--sql/mysqld.cc98
-rw-r--r--sql/opt_range.cc15
-rw-r--r--sql/set_var.cc221
-rw-r--r--sql/set_var.h72
-rw-r--r--sql/sql_base.cc131
-rw-r--r--sql/sql_lex.cc33
-rw-r--r--sql/sql_lex.h14
-rw-r--r--sql/sql_parse.cc17
-rw-r--r--sql/sql_table.cc106
-rw-r--r--sql/sql_test.cc5
-rw-r--r--sql/sql_yacc.yy68
-rw-r--r--sql/table.h34
60 files changed, 2800 insertions, 821 deletions
diff --git a/include/my_base.h b/include/my_base.h
index 5af8a968cd0..4c3e53c4573 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -46,6 +46,7 @@
#define HA_OPEN_DELAY_KEY_WRITE 8 /* Don't update index */
#define HA_OPEN_ABORT_IF_CRASHED 16
#define HA_OPEN_FOR_REPAIR 32 /* open even if crashed */
+#define HA_OPEN_TO_ASSIGN 64 /* Open for key cache assignment */
/* The following is parameter to ha_rkey() how to use key */
@@ -131,7 +132,8 @@ enum ha_extra_function {
current query id */
HA_EXTRA_PREPARE_FOR_DELETE,
HA_EXTRA_PREPARE_FOR_UPDATE, /* Remove read cache if problems */
- HA_EXTRA_PRELOAD_BUFFER_SIZE /* Set buffer size for preloading */
+ HA_EXTRA_PRELOAD_BUFFER_SIZE, /* Set buffer size for preloading */
+ HA_EXTRA_SET_KEY_CACHE /* Set ref to assigned key cache */
};
/* The following is parameter to ha_panic() */
diff --git a/include/my_global.h b/include/my_global.h
index 637b8d4b371..73287a2ba2a 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -558,6 +558,9 @@ typedef SOCKET_SIZE_TYPE size_socket;
#define RECORD_CACHE_SIZE (uint) (64*1024-MALLOC_OVERHEAD)
/* Typical key cash */
#define KEY_CACHE_SIZE (uint) (8*1024*1024-MALLOC_OVERHEAD)
+ /* Default size of a key cache block */
+#define KEY_CACHE_BLOCK_SIZE (uint) 1024
+
/* Some things that this system doesn't have */
diff --git a/include/my_sys.h b/include/my_sys.h
index 331ff3d60c9..c404e54c2a9 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -111,8 +111,6 @@ extern int NEAR my_errno; /* Last error in mysys */
#define MY_WAIT_FOR_USER_TO_FIX_PANIC 60 /* in seconds */
#define MY_WAIT_GIVE_USER_A_MESSAGE 10 /* Every 10 times of prev */
#define MIN_COMPRESS_LENGTH 50 /* Don't compress small bl. */
-#define DEFAULT_KEYCACHE_BLOCK_SIZE 1024
-#define MAX_KEYCACHE_BLOCK_SIZE 16384
/* root_alloc flags */
#define MY_KEEP_PREALLOC 1
@@ -267,7 +265,8 @@ enum cache_type
enum flush_type
{
- FLUSH_KEEP, FLUSH_RELEASE, FLUSH_IGNORE_CHANGED, FLUSH_FORCE_WRITE
+ FLUSH_KEEP, FLUSH_RELEASE, FLUSH_IGNORE_CHANGED, FLUSH_FORCE_WRITE,
+ FLUSH_REMOVE
};
typedef struct st_record_cache /* Used when cacheing records */
@@ -504,14 +503,46 @@ my_off_t my_b_append_tell(IO_CACHE* info);
#define my_b_bytes_in_cache(info) (uint) (*(info)->current_end - \
*(info)->current_pos)
-/* key_cache_variables */
-typedef struct st_keycache
-{
- ulonglong size;
-} KEY_CACHE;
-
typedef uint32 ha_checksum;
+/* Pointer to a key cache data structure (see the key cache module) */
+typedef struct st_key_cache* KEY_CACHE_HANDLE;
+
+/* Key cache variable structure */
+/*
+ The structure contains the parameters of a key cache that can
+ be set and undated by regular set global statements.
+ It also contains read-only statistics parameters.
+ If the corresponding key cache data structure has been already
+ created the variable contains the key cache handle.
+ The variables are put into a named list called key_caches.
+ At present the variables are only added to this list.
+*/
+typedef struct st_key_cache_var
+{
+ ulonglong buff_size; /* size the memory allocated for the cache */
+ ulong block_size; /* size of the blocks in the key cache */
+ ulong division_limit; /* min. percentage of warm blocks */
+ ulong age_threshold; /* determines when hot block is downgraded */
+ KEY_CACHE_HANDLE cache; /* handles for the current and registered */
+ ulong blocks_used; /* number of currently used blocks */
+ ulong blocks_changed; /* number of currently dirty blocks */
+ ulong cache_w_requests; /* number of write requests (write hits) */
+ ulong cache_write; /* number of writes from the cache to files */
+ ulong cache_r_requests; /* number of read requests (read hits) */
+ ulong cache_read; /* number of reads from files to the cache */
+ int blocks; /* max number of blocks in the cache */
+ struct st_key_cache_asmt *assign_list; /* list of assignments to the cache */
+ int assignments; /* number of not completed assignments */
+ void (*action)(void *); /* optional call back function */
+ void *extra_info; /* ptr to extra info */
+} KEY_CACHE_VAR;
+
+#define DEFAULT_KEY_CACHE_NAME "default"
+extern KEY_CACHE_HANDLE *dflt_keycache;
+extern KEY_CACHE_VAR dflt_key_cache_var;
+#define DFLT_INIT_HITS 3
+
#include <my_alloc.h>
/* Prototypes for mysys and my_func functions */
@@ -650,16 +681,26 @@ extern int flush_write_cache(RECORD_CACHE *info);
extern long my_clock(void);
extern sig_handler sigtstp_handler(int signal_number);
extern void handle_recived_signals(void);
-extern int init_key_cache(ulong use_mem);
-extern int resize_key_cache(ulong use_mem);
-extern byte *key_cache_read(File file,my_off_t filepos,byte* buff,uint length,
+extern int init_key_cache(KEY_CACHE_HANDLE *pkeycache,
+ uint key_cache_block_size,
+ ulong use_mem, KEY_CACHE_VAR* env);
+extern int resize_key_cache(KEY_CACHE_HANDLE *pkeycache,
+ uint key_cache_block_size, ulong use_mem);
+extern void change_key_cache_param(KEY_CACHE_HANDLE keycache);
+extern byte *key_cache_read(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
+ byte* buff, uint length,
uint block_length,int return_buffer);
-extern int key_cache_insert(File file, my_off_t filepos,
+extern int key_cache_insert(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
byte *buff, uint length);
-extern int key_cache_write(File file,my_off_t filepos,byte* buff,uint length,
+extern int key_cache_write(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
+ byte* buff, uint length,
uint block_length,int force_write);
-extern int flush_key_blocks(int file, enum flush_type type);
-extern void end_key_cache(void);
+extern int flush_key_blocks(KEY_CACHE_HANDLE keycache,
+ int file, enum flush_type type);
+extern void end_key_cache(KEY_CACHE_HANDLE *pkeycache,my_bool cleanup);
extern sig_handler my_set_alarm_variable(int signo);
extern void my_string_ptr_sort(void *base,uint items,size_s size);
extern void radixsort_for_str_ptr(uchar* base[], uint number_of_elements,
diff --git a/include/myisam.h b/include/myisam.h
index 0ffcdae8567..bf28168b7d7 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -408,6 +408,9 @@ my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows, ulonglong key_map,
int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows);
void mi_flush_bulk_insert(MI_INFO *info, uint inx);
void mi_end_bulk_insert(MI_INFO *info);
+int mi_assign_to_keycache(MI_INFO *info, ulonglong key_map,
+ KEY_CACHE_VAR *key_cache,
+ pthread_mutex_t *assign_lock);
int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves);
#ifdef __cplusplus
diff --git a/isam/_locking.c b/isam/_locking.c
index be9741a4237..0ffb46a81f8 100644
--- a/isam/_locking.c
+++ b/isam/_locking.c
@@ -50,7 +50,7 @@ int nisam_lock_database(N_INFO *info, int lock_type)
else
count= --share->w_locks;
if (info->lock_type == F_WRLCK && !share->w_locks &&
- flush_key_blocks(share->kfile,FLUSH_KEEP))
+ flush_key_blocks(*dflt_keycache,share->kfile,FLUSH_KEEP))
error=my_errno;
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
if (end_io_cache(&info->rec_cache))
@@ -329,7 +329,7 @@ int _nisam_test_if_changed(register N_INFO *info)
share->state.uniq != info->last_uniq)
{ /* Keyfile has changed */
if (share->state.process != share->this_process)
- VOID(flush_key_blocks(share->kfile,FLUSH_RELEASE));
+ VOID(flush_key_blocks(*dflt_keycache,share->kfile,FLUSH_RELEASE));
share->last_process=share->state.process;
info->last_loop= share->state.loop;
info->last_uniq= share->state.uniq;
diff --git a/isam/_page.c b/isam/_page.c
index 65733d66b77..60f1dfb0ce6 100644
--- a/isam/_page.c
+++ b/isam/_page.c
@@ -27,10 +27,11 @@ uchar *_nisam_fetch_keypage(register N_INFO *info, N_KEYDEF *keyinfo,
my_off_t page, uchar *buff, int return_buffer)
{
uchar *tmp;
- tmp=(uchar*) key_cache_read(info->s->kfile,page,(byte*) buff,
- (uint) keyinfo->base.block_length,
- (uint) keyinfo->base.block_length,
- return_buffer);
+ tmp=(uchar*) key_cache_read(*dflt_keycache,
+ info->s->kfile,page,DFLT_INIT_HITS,(byte*) buff,
+ (uint) keyinfo->base.block_length,
+ (uint) keyinfo->base.block_length,
+ return_buffer);
if (tmp == info->buff)
{
info->update|=HA_STATE_BUFF_SAVED;
@@ -83,9 +84,11 @@ int _nisam_write_keypage(register N_INFO *info, register N_KEYDEF *keyinfo,
length=keyinfo->base.block_length;
}
#endif
- return (key_cache_write(info->s->kfile,page,(byte*) buff,length,
- (uint) keyinfo->base.block_length,
- (int) (info->lock_type != F_UNLCK)));
+ return (key_cache_write(*dflt_keycache,
+ info->s->kfile,page,DFLT_INIT_HITS,
+ (byte*) buff,length,
+ (uint) keyinfo->base.block_length,
+ (int) (info->lock_type != F_UNLCK)));
} /* nisam_write_keypage */
@@ -99,7 +102,9 @@ int _nisam_dispose(register N_INFO *info, N_KEYDEF *keyinfo, my_off_t pos)
old_link=info->s->state.key_del[keynr];
info->s->state.key_del[keynr]=(ulong) pos;
- DBUG_RETURN(key_cache_write(info->s->kfile,pos,(byte*) &old_link,
+ DBUG_RETURN(key_cache_write(*dflt_keycache,
+ info->s->kfile,pos,DFLT_INIT_HITS,
+ (byte*) &old_link,
sizeof(long),
(uint) keyinfo->base.block_length,
(int) (info->lock_type != F_UNLCK)));
@@ -126,7 +131,8 @@ ulong _nisam_new(register N_INFO *info, N_KEYDEF *keyinfo)
}
else
{
- if (!key_cache_read(info->s->kfile,pos,
+ if (!key_cache_read(*dflt_keycache,
+ info->s->kfile,pos,DFLT_INIT_HITS,
(byte*) &info->s->state.key_del[keynr],
(uint) sizeof(long),
(uint) keyinfo->base.block_length,0))
diff --git a/isam/close.c b/isam/close.c
index f1465990100..37b35e450ae 100644
--- a/isam/close.c
+++ b/isam/close.c
@@ -56,7 +56,8 @@ int nisam_close(register N_INFO *info)
if (flag)
{
- if (share->kfile >= 0 && flush_key_blocks(share->kfile,FLUSH_RELEASE))
+ if (share->kfile >= 0 &&
+ flush_key_blocks(*dflt_keycache,share->kfile,FLUSH_RELEASE))
error=my_errno;
if (share->kfile >= 0 && my_close(share->kfile,MYF(0)))
error = my_errno;
diff --git a/isam/isamchk.c b/isam/isamchk.c
index 939a4be732f..e59da07e85b 100644
--- a/isam/isamchk.c
+++ b/isam/isamchk.c
@@ -516,7 +516,8 @@ static int nisamchk(my_string filename)
if (!rep_quick)
{
if (testflag & T_EXTEND)
- VOID(init_key_cache(use_buffers));
+ VOID(init_key_cache(dflt_keycache,KEY_CACHE_BLOCK_SIZE,
+ use_buffers,&dflt_key_cache_var));
VOID(init_io_cache(&read_cache,datafile,(uint) read_buffer_length,
READ_CACHE,share->pack.header_length,1,
MYF(MY_WME)));
@@ -1459,7 +1460,8 @@ my_string name;
printf("Data records: %lu\n",(ulong) share->state.records);
}
- VOID(init_key_cache(use_buffers));
+ VOID(init_key_cache(dflt_keycache,KEY_CACHE_BLOCK_SIZE,use_buffers,
+ &dflt_key_cache_var));
if (init_io_cache(&read_cache,info->dfile,(uint) read_buffer_length,
READ_CACHE,share->pack.header_length,1,MYF(MY_WME)))
goto err;
@@ -1887,12 +1889,12 @@ static void lock_memory(void)
static int flush_blocks(file)
File file;
{
- if (flush_key_blocks(file,FLUSH_RELEASE))
+ if (flush_key_blocks(dflt_keycache,file,FLUSH_RELEASE))
{
print_error("%d when trying to write bufferts",my_errno);
return(1);
}
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
return 0;
} /* flush_blocks */
@@ -1936,7 +1938,8 @@ int write_info;
if (share->state.key_root[sort_key] == NI_POS_ERROR)
DBUG_RETURN(0); /* Nothing to do */
- init_key_cache(use_buffers);
+ init_key_cache(dflt_keycache,KEY_CACHE_BLOCK_SIZE,use_buffers,
+ &dflt_key_cache_var);
if (init_io_cache(&info->rec_cache,-1,(uint) write_buffer_length,
WRITE_CACHE,share->pack.header_length,1,
MYF(MY_WME | MY_WAIT_IF_FULL)))
diff --git a/isam/isamlog.c b/isam/isamlog.c
index ff3bca39e40..d8ea9d4ed80 100644
--- a/isam/isamlog.c
+++ b/isam/isamlog.c
@@ -329,8 +329,8 @@ static int examine_log(my_string file_name, char **table_names)
bzero((gptr) com_count,sizeof(com_count));
init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1,
(tree_element_free) file_info_free, NULL);
- VOID(init_key_cache(KEY_CACHE_SIZE));
-
+ VOID(init_key_cache(dflt_keycache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE,
+ &dflt_key_cache_var));
files_open=0; access_time=0;
while (access_time++ != number_of_commands &&
!my_b_read(&cache,(byte*) head,9))
@@ -622,7 +622,7 @@ static int examine_log(my_string file_name, char **table_names)
goto end;
}
}
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
delete_tree(&tree);
VOID(end_io_cache(&cache));
VOID(my_close(file,MYF(0)));
@@ -642,7 +642,7 @@ static int examine_log(my_string file_name, char **table_names)
llstr(isamlog_filepos,llbuff)));
fflush(stderr);
end:
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
delete_tree(&tree);
VOID(end_io_cache(&cache));
VOID(my_close(file,MYF(0)));
diff --git a/isam/panic.c b/isam/panic.c
index e51e83671df..de765f50e62 100644
--- a/isam/panic.c
+++ b/isam/panic.c
@@ -48,7 +48,7 @@ int nisam_panic(enum ha_panic_function flag)
if (info->s->base.options & HA_OPTION_READ_ONLY_DATA)
break;
#endif
- if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE))
+ if (flush_key_blocks(*dflt_keycache,info->s->kfile,FLUSH_RELEASE))
error=my_errno;
if (info->opt_flag & WRITE_CACHE_USED)
if (flush_io_cache(&info->rec_cache))
diff --git a/isam/test2.c b/isam/test2.c
index 5b09cc8b716..67b411d1bbf 100644
--- a/isam/test2.c
+++ b/isam/test2.c
@@ -156,7 +156,7 @@ int main(int argc, char *argv[])
goto err;
printf("- Writing key:s\n");
if (key_cacheing)
- init_key_cache(IO_SIZE*16); /* Use a small cache */
+ init_key_cache(dflt_keycache,512,IO_SIZE*16,0); /* Use a small cache */
if (locking)
nisam_lock_database(file,F_WRLCK);
if (write_cacheing)
@@ -674,7 +674,7 @@ end:
puts("Locking used");
if (use_blob)
puts("blobs used");
- end_key_cache();
+ end_key_cache(&dflt_keycache,1);
if (blob_buffer)
my_free(blob_buffer,MYF(0));
my_end(MY_CHECK_ERROR | MY_GIVE_INFO);
diff --git a/isam/test3.c b/isam/test3.c
index 228030f5832..1b867ba0348 100644
--- a/isam/test3.c
+++ b/isam/test3.c
@@ -173,7 +173,7 @@ void start_test(int id)
exit(1);
}
if (key_cacheing && rnd(2) == 0)
- init_key_cache(65536L);
+ init_key_cache(dflt_keycache,512,65536L,0);
printf("Process %d, pid: %d\n",id,(int) getpid()); fflush(stdout);
for (error=i=0 ; i < tests && !error; i++)
diff --git a/myisam/Makefile.am b/myisam/Makefile.am
index d4cd953ac66..fdcfc6d0d41 100644
--- a/myisam/Makefile.am
+++ b/myisam/Makefile.am
@@ -47,7 +47,7 @@ libmyisam_a_SOURCES = mi_open.c mi_extra.c mi_info.c mi_rkey.c \
mi_range.c mi_dbug.c mi_checksum.c mi_log.c \
mi_changed.c mi_static.c mi_delete_all.c \
mi_delete_table.c mi_rename.c mi_check.c \
- mi_preload.c \
+ mi_keycache.c mi_preload.c \
ft_parser.c ft_stopwords.c ft_static.c \
ft_update.c ft_boolean_search.c ft_nlq_search.c sort.c \
rt_index.c rt_key.c rt_mbr.c rt_split.c sp_key.c
diff --git a/myisam/mi_check.c b/myisam/mi_check.c
index ceac8f6356c..fe7ff1af407 100644
--- a/myisam/mi_check.c
+++ b/myisam/mi_check.c
@@ -242,7 +242,9 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr)
if (next_link > info->state->key_file_length ||
next_link & (info->s->blocksize-1))
DBUG_RETURN(1);
- if (!(buff=key_cache_read(info->s->kfile, next_link, (byte*) info->buff,
+ if (!(buff=key_cache_read(*info->s->keycache,
+ info->s->kfile, next_link, DFLT_INIT_HITS,
+ (byte*) info->buff,
myisam_block_size, block_size, 1)))
DBUG_RETURN(1);
next_link=mi_sizekorr(buff);
@@ -271,7 +273,8 @@ int chk_size(MI_CHECK *param, register MI_INFO *info)
if (!(param->testflag & T_SILENT)) puts("- check file-size");
- flush_key_blocks(info->s->kfile, FLUSH_FORCE_WRITE); /* If called externally */
+ flush_key_blocks(*info->s->keycache,
+ info->s->kfile, FLUSH_FORCE_WRITE); /* If called externally */
size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0));
if ((skr=(my_off_t) info->state->key_file_length) != size)
@@ -381,8 +384,8 @@ int chk_key(MI_CHECK *param, register MI_INFO *info)
if (share->state.key_root[key] == HA_OFFSET_ERROR &&
(info->state->records == 0 || keyinfo->flag & HA_FULLTEXT))
continue;
- if (!_mi_fetch_keypage(info,keyinfo,share->state.key_root[key],info->buff,
- 0))
+ if (!_mi_fetch_keypage(info,keyinfo,share->state.key_root[key],
+ DFLT_INIT_HITS,info->buff,0))
{
mi_check_print_error(param,"Can't read indexpage from filepos: %s",
llstr(share->state.key_root[key],buff));
@@ -560,7 +563,8 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
info->state->key_file_length=(max_length &
~ (my_off_t) (info->s->blocksize-1));
}
- if (!_mi_fetch_keypage(info,keyinfo,next_page,temp_buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,next_page,
+ DFLT_INIT_HITS,temp_buff,0))
{
mi_check_print_error(param,"Can't read key from filepos: %s",llstr(next_page,llbuff));
goto err;
@@ -1139,7 +1143,8 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
param->testflag|=T_REP; /* for easy checking */
if (!param->using_global_keycache)
- VOID(init_key_cache(param->use_buffers));
+ VOID(init_key_cache(dflt_keycache,dflt_key_cache_var.block_size,
+ param->use_buffers,&dflt_key_cache_var));
if (init_io_cache(&param->read_cache,info->dfile,
(uint) param->read_buffer_length,
@@ -1460,7 +1465,8 @@ int movepoint(register MI_INFO *info, byte *record, my_off_t oldpos,
nod_flag=mi_test_if_nod(info->buff);
_mi_dpointer(info,info->int_keypos-nod_flag-
info->s->rec_reflength,newpos);
- if (_mi_write_keypage(info,keyinfo,info->last_keypage,info->buff))
+ if (_mi_write_keypage(info,keyinfo,info->last_keypage,
+ DFLT_INIT_HITS,info->buff))
DBUG_RETURN(-1);
}
else
@@ -1497,13 +1503,13 @@ void lock_memory(MI_CHECK *param __attribute__((unused)))
int flush_blocks(MI_CHECK *param, File file)
{
- if (flush_key_blocks(file,FLUSH_RELEASE))
+ if (flush_key_blocks(*dflt_keycache,file,FLUSH_RELEASE))
{
mi_check_print_error(param,"%d when trying to write bufferts",my_errno);
return(1);
}
if (!param->using_global_keycache)
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
return 0;
} /* flush_blocks */
@@ -1558,7 +1564,7 @@ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
}
/* Flush key cache for this file if we are calling this outside myisamchk */
- flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(*share->keycache,share->kfile, FLUSH_IGNORE_CHANGED);
share->state.version=(ulong) time((time_t*) 0);
old_state= share->state; /* save state if not stored */
@@ -1622,7 +1628,7 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
mi_check_print_error(param,"Not Enough memory");
DBUG_RETURN(-1);
}
- if (!_mi_fetch_keypage(info,keyinfo,pagepos,buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,pagepos,DFLT_INIT_HITS,buff,0))
{
mi_check_print_error(param,"Can't read key block from filepos: %s",
llstr(pagepos,llbuff));
@@ -1868,7 +1874,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
Flush key cache for this file if we are calling this outside
myisamchk
*/
- flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(*share->keycache,share->kfile, FLUSH_IGNORE_CHANGED);
/* Clear the pointers to the given rows */
for (i=0 ; i < share->base.keys ; i++)
share->state.key_root[i]= HA_OFFSET_ERROR;
@@ -1878,7 +1884,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
}
else
{
- if (flush_key_blocks(share->kfile, FLUSH_FORCE_WRITE))
+ if (flush_key_blocks(*share->keycache,share->kfile, FLUSH_FORCE_WRITE))
goto err;
key_map= ~key_map; /* Create the missing keys */
}
@@ -2231,7 +2237,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
Flush key cache for this file if we are calling this outside
myisamchk
*/
- flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(*share->keycache,share->kfile, FLUSH_IGNORE_CHANGED);
/* Clear the pointers to the given rows */
for (i=0 ; i < share->base.keys ; i++)
share->state.key_root[i]= HA_OFFSET_ERROR;
@@ -2241,7 +2247,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
}
else
{
- if (flush_key_blocks(share->kfile, FLUSH_FORCE_WRITE))
+ if (flush_key_blocks(*share->keycache,share->kfile, FLUSH_FORCE_WRITE))
goto err;
key_map= ~key_map; /* Create the missing keys */
}
@@ -3356,13 +3362,13 @@ static int sort_insert_key(MI_SORT_PARAM *sort_param,
bzero((byte*) anc_buff+key_block->last_length,
keyinfo->block_length- key_block->last_length);
key_file_length=info->state->key_file_length;
- if ((filepos=_mi_new(info,keyinfo)) == HA_OFFSET_ERROR)
+ if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
DBUG_RETURN(1);
/* If we read the page from the key cache, we have to write it back to it */
if (key_file_length == info->state->key_file_length)
{
- if (_mi_write_keypage(info, keyinfo, filepos, anc_buff))
+ if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, anc_buff))
DBUG_RETURN(1);
}
else if (my_pwrite(info->s->kfile,(byte*) anc_buff,
@@ -3460,13 +3466,14 @@ int flush_pending_blocks(MI_SORT_PARAM *sort_param)
_mi_kpointer(info,key_block->end_pos,filepos);
key_file_length=info->state->key_file_length;
bzero((byte*) key_block->buff+length, keyinfo->block_length-length);
- if ((filepos=_mi_new(info,keyinfo)) == HA_OFFSET_ERROR)
+ if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
DBUG_RETURN(1);
/* If we read the page from the key cache, we have to write it back */
if (key_file_length == info->state->key_file_length)
{
- if (_mi_write_keypage(info, keyinfo, filepos, key_block->buff))
+ if (_mi_write_keypage(info, keyinfo, filepos,
+ DFLT_INIT_HITS, key_block->buff))
DBUG_RETURN(1);
}
else if (my_pwrite(info->s->kfile,(byte*) key_block->buff,
diff --git a/myisam/mi_close.c b/myisam/mi_close.c
index dbaaebb1143..8e7fb497dfc 100644
--- a/myisam/mi_close.c
+++ b/myisam/mi_close.c
@@ -64,7 +64,7 @@ int mi_close(register MI_INFO *info)
if (flag)
{
if (share->kfile >= 0 &&
- flush_key_blocks(share->kfile,
+ flush_key_blocks(*share->keycache,share->kfile,
share->temporary ? FLUSH_IGNORE_CHANGED :
FLUSH_RELEASE))
error=my_errno;
diff --git a/myisam/mi_delete.c b/myisam/mi_delete.c
index 2ab5c5d0319..5635e0a2139 100644
--- a/myisam/mi_delete.c
+++ b/myisam/mi_delete.c
@@ -155,7 +155,7 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo,
DBUG_RETURN(my_errno=ENOMEM);
}
DBUG_PRINT("info",("root_page: %ld",old_root));
- if (!_mi_fetch_keypage(info,keyinfo,old_root,root_buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,old_root,DFLT_INIT_HITS,root_buff,0))
{
error= -1;
goto err;
@@ -179,11 +179,12 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo,
*root=_mi_kpos(nod_flag,root_buff+2+nod_flag);
else
*root=HA_OFFSET_ERROR;
- if (_mi_dispose(info,keyinfo,old_root))
+ if (_mi_dispose(info,keyinfo,old_root,DFLT_INIT_HITS))
error= -1;
}
else
- error=_mi_write_keypage(info,keyinfo,old_root,root_buff);
+ error=_mi_write_keypage(info,keyinfo,old_root,
+ DFLT_INIT_HITS,root_buff);
}
}
err:
@@ -253,7 +254,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
{
/* the last entry in sub-tree */
DBUG_PRINT("info",("FT2: the last entry"));
- _mi_dispose(info, keyinfo, root);
+ _mi_dispose(info, keyinfo, root,DFLT_INIT_HITS);
/* fall through to normal delete */
}
else
@@ -268,7 +269,8 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
subkeys++;
ft_intXstore(kpos, subkeys);
if (!ret_value)
- ret_value=_mi_write_keypage(info,keyinfo,page,anc_buff);
+ ret_value=_mi_write_keypage(info,keyinfo,page,
+ DFLT_INIT_HITS,anc_buff);
DBUG_PRINT("exit",("Return: %d",ret_value));
DBUG_RETURN(ret_value);
}
@@ -287,7 +289,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
DBUG_PRINT("exit",("Return: %d",-1));
DBUG_RETURN(-1);
}
- if (!_mi_fetch_keypage(info,keyinfo,leaf_page,leaf_buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff,0))
goto err;
}
@@ -319,7 +321,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
mi_putint(anc_buff,length,nod_flag);
if (!nod_flag)
{ /* On leaf page */
- if (_mi_write_keypage(info,keyinfo,page,anc_buff))
+ if (_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,anc_buff))
{
DBUG_PRINT("exit",("Return: %d",-1));
DBUG_RETURN(-1);
@@ -354,7 +356,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
ret_value=_mi_split_page(info,keyinfo,key,anc_buff,lastkey,0) | 2;
}
if (save_flag && ret_value != 1)
- ret_value|=_mi_write_keypage(info,keyinfo,page,anc_buff);
+ ret_value|=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,anc_buff);
else
{
DBUG_DUMP("page",(byte*) anc_buff,mi_getint(anc_buff));
@@ -398,7 +400,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
if (!(next_buff= (uchar*) my_alloca((uint) keyinfo->block_length+
MI_MAX_KEY_BUFF*2)))
DBUG_RETURN(-1);
- if (!_mi_fetch_keypage(info,keyinfo,next_page,next_buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,next_buff,0))
ret_value= -1;
else
{
@@ -426,7 +428,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
(uchar*) 0,(uchar*) 0,(my_off_t) 0,0);
}
}
- if (_mi_write_keypage(info,keyinfo,leaf_page,leaf_buff))
+ if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff))
goto err;
}
my_afree((byte*) next_buff);
@@ -436,7 +438,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
/* Remove last key from leaf page */
mi_putint(leaf_buff,key_start-leaf_buff,nod_flag);
- if (_mi_write_keypage(info,keyinfo,leaf_page,leaf_buff))
+ if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff))
goto err;
/* Place last key in ancestor page on deleted key position */
@@ -524,7 +526,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
goto err;
}
next_page= _mi_kpos(key_reflength,next_keypos);
- if (!_mi_fetch_keypage(info,keyinfo,next_page,buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff,0))
goto err;
buff_length=mi_getint(buff);
DBUG_DUMP("next",(byte*) buff,buff_length);
@@ -563,7 +565,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (buff_length <= keyinfo->block_length)
{ /* Keys in one page */
memcpy((byte*) leaf_buff,(byte*) buff,(size_t) buff_length);
- if (_mi_dispose(info,keyinfo,next_page))
+ if (_mi_dispose(info,keyinfo,next_page,DFLT_INIT_HITS))
goto err;
}
else
@@ -612,10 +614,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
(*keyinfo->store_key)(keyinfo,buff+p_length,&s_temp);
mi_putint(buff,length+t_length+p_length,nod_flag);
- if (_mi_write_keypage(info,keyinfo,next_page,buff))
+ if (_mi_write_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff))
goto err;
}
- if (_mi_write_keypage(info,keyinfo,leaf_page,leaf_buff))
+ if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff))
goto err;
DBUG_RETURN(anc_length <= ((info->quick_mode ? MI_MIN_BLOCK_LENGTH :
(uint) keyinfo->underflow_block_length)));
@@ -627,7 +629,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (!keypos)
goto err;
next_page= _mi_kpos(key_reflength,keypos);
- if (!_mi_fetch_keypage(info,keyinfo,next_page,buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff,0))
goto err;
buff_length=mi_getint(buff);
endpos=buff+buff_length;
@@ -671,7 +673,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (buff_length <= keyinfo->block_length)
{ /* Keys in one page */
- if (_mi_dispose(info,keyinfo,leaf_page))
+ if (_mi_dispose(info,keyinfo,leaf_page,DFLT_INIT_HITS))
goto err;
}
else
@@ -718,11 +720,11 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
(size_t) length);
(*keyinfo->store_key)(keyinfo,leaf_buff+p_length,&s_temp);
mi_putint(leaf_buff,length+t_length+p_length,nod_flag);
- if (_mi_write_keypage(info,keyinfo,leaf_page,leaf_buff))
+ if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff))
goto err;
mi_putint(buff,endpos-buff,nod_flag);
}
- if (_mi_write_keypage(info,keyinfo,next_page,buff))
+ if (_mi_write_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff))
goto err;
DBUG_RETURN(anc_length <= (uint) keyinfo->block_length/2);
err:
diff --git a/myisam/mi_delete_all.c b/myisam/mi_delete_all.c
index 45e56626d59..99873661feb 100644
--- a/myisam/mi_delete_all.c
+++ b/myisam/mi_delete_all.c
@@ -53,7 +53,7 @@ int mi_delete_all_rows(MI_INFO *info)
If we are using delayed keys or if the user has done changes to the tables
since it was locked then there may be key blocks in the key cache
*/
- flush_key_blocks(share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(*share->keycache, share->kfile, FLUSH_IGNORE_CHANGED);
if (my_chsize(info->dfile, 0, 0, MYF(MY_WME)) ||
my_chsize(share->kfile, share->base.keystart, 0, MYF(MY_WME)) )
goto err;
diff --git a/myisam/mi_extra.c b/myisam/mi_extra.c
index c86ca1c2e7b..1d57e0c0e18 100644
--- a/myisam/mi_extra.c
+++ b/myisam/mi_extra.c
@@ -283,7 +283,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
#ifdef __WIN__
/* Close the isam and data files as Win32 can't drop an open table */
pthread_mutex_lock(&share->intern_lock);
- if (flush_key_blocks(share->kfile,
+ if (flush_key_blocks(*share->keycache, share->kfile,
(function == HA_EXTRA_FORCE_REOPEN ?
FLUSH_RELEASE : FLUSH_IGNORE_CHANGED)))
{
@@ -329,7 +329,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
break;
case HA_EXTRA_FLUSH:
if (!share->temporary)
- flush_key_blocks(share->kfile,FLUSH_KEEP);
+ flush_key_blocks(*share->keycache,share->kfile,FLUSH_KEEP);
#ifdef HAVE_PWRITE
_mi_decrement_open_count(info);
#endif
@@ -374,6 +374,9 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
case HA_EXTRA_PRELOAD_BUFFER_SIZE:
info->preload_buff_size= *((ulong *) extra_arg);
break;
+ case HA_EXTRA_SET_KEY_CACHE:
+ share->reg_keycache= share->keycache= (KEY_CACHE_HANDLE *) extra_arg;
+ break;
case HA_EXTRA_KEY_CACHE:
case HA_EXTRA_NO_KEY_CACHE:
default:
diff --git a/myisam/mi_keycache.c b/myisam/mi_keycache.c
new file mode 100644
index 00000000000..4b7d7d6b23f
--- /dev/null
+++ b/myisam/mi_keycache.c
@@ -0,0 +1,107 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Key cache assignments
+*/
+
+#include "myisamdef.h"
+
+
+/*
+ Assign pages of the index file for a table to a key cache
+
+ SYNOPSIS
+ mi_assign_to_keycache()
+ info open table
+ map map of indexes to assign to the key cache
+ key_cache_ptr pointer to the key cache handle
+
+ RETURN VALUE
+ 0 if a success. error code - otherwise.
+
+ NOTES.
+ At present pages for all indexes must be assigned to the same key cache.
+ In future only pages for indexes specified in the key_map parameter
+ of the table will be assigned to the specified key cache.
+*/
+
+typedef struct st_assign_extra_info
+{
+ pthread_mutex_t *lock;
+ struct st_my_thread_var *waiting_thread;
+} ASSIGN_EXTRA_INFO;
+
+static void remove_key_cache_assign(void *arg)
+{
+ KEY_CACHE_VAR *key_cache= (KEY_CACHE_VAR *) arg;
+ ASSIGN_EXTRA_INFO *extra_info= (ASSIGN_EXTRA_INFO *) key_cache->extra_info;
+ struct st_my_thread_var *waiting_thread;
+ pthread_mutex_t *lock= extra_info->lock;
+ pthread_mutex_lock(lock);
+ if (!(--key_cache->assignments) &&
+ (waiting_thread = extra_info->waiting_thread))
+ {
+ my_free(extra_info, MYF(0));
+ key_cache->extra_info= 0;
+ if (waiting_thread != my_thread_var)
+ pthread_cond_signal(&waiting_thread->suspend);
+ }
+ pthread_mutex_unlock(lock);
+}
+
+int mi_assign_to_keycache(MI_INFO *info, ulonglong key_map,
+ KEY_CACHE_VAR *key_cache,
+ pthread_mutex_t *assign_lock)
+{
+ ASSIGN_EXTRA_INFO *extra_info;
+ int error= 0;
+ MYISAM_SHARE* share= info->s;
+
+ DBUG_ENTER("mi_assign_to_keycache");
+
+ share->reg_keycache= &key_cache->cache;
+ pthread_mutex_lock(assign_lock);
+ if (!(extra_info= (ASSIGN_EXTRA_INFO *) key_cache->extra_info))
+ {
+ if (!(extra_info= (ASSIGN_EXTRA_INFO*) my_malloc(sizeof(ASSIGN_EXTRA_INFO),
+ MYF(MY_WME | MY_ZEROFILL))))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ key_cache->extra_info= extra_info;
+ key_cache->action= remove_key_cache_assign;
+ extra_info->lock= assign_lock;
+ }
+ key_cache->assignments++;
+ pthread_mutex_unlock(assign_lock);
+
+ if (!(info->lock_type == F_WRLCK && share->w_locks))
+ {
+ if (flush_key_blocks(*share->keycache, share->kfile, FLUSH_REMOVE))
+ {
+ error=my_errno;
+ mi_mark_crashed(info); /* Mark that table must be checked */
+ }
+ share->keycache= &key_cache->cache;
+ }
+ else
+ {
+ extra_info->waiting_thread= my_thread_var;
+ }
+
+
+ DBUG_RETURN(error);
+}
+
diff --git a/myisam/mi_locking.c b/myisam/mi_locking.c
index a707eb294a9..785979235ce 100644
--- a/myisam/mi_locking.c
+++ b/myisam/mi_locking.c
@@ -34,6 +34,7 @@ int mi_lock_database(MI_INFO *info, int lock_type)
uint count;
MYISAM_SHARE *share=info->s;
uint flag;
+ uint switch_fl= 0;
DBUG_ENTER("mi_lock_database");
DBUG_PRINT("info",("lock_type: %d", lock_type));
@@ -60,12 +61,21 @@ int mi_lock_database(MI_INFO *info, int lock_type)
else
count= --share->w_locks;
--share->tot_locks;
+ /*
+ During a key cache reassignment the current and registered
+ key caches for the table are different.
+ */
if (info->lock_type == F_WRLCK && !share->w_locks &&
- !share->delay_key_write && flush_key_blocks(share->kfile,FLUSH_KEEP))
+ ((switch_fl= share->keycache != share->reg_keycache) ||
+ !share->delay_key_write) &&
+ flush_key_blocks(*share->keycache, share->kfile,
+ switch_fl ? FLUSH_REMOVE : FLUSH_KEEP))
{
error=my_errno;
mi_mark_crashed(info); /* Mark that table must be checked */
}
+ if (switch_fl)
+ share->keycache= share->reg_keycache;
if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED))
{
if (end_io_cache(&info->rec_cache))
@@ -400,7 +410,7 @@ int _mi_test_if_changed(register MI_INFO *info)
{ /* Keyfile has changed */
DBUG_PRINT("info",("index file changed"));
if (share->state.process != share->this_process)
- VOID(flush_key_blocks(share->kfile,FLUSH_RELEASE));
+ VOID(flush_key_blocks(*share->keycache,share->kfile,FLUSH_RELEASE));
share->last_process=share->state.process;
info->last_unique= share->state.unique;
info->last_loop= share->state.update_count;
diff --git a/myisam/mi_page.c b/myisam/mi_page.c
index 49f3243e08d..72aaac5d381 100644
--- a/myisam/mi_page.c
+++ b/myisam/mi_page.c
@@ -24,14 +24,16 @@
/* Fetch a key-page in memory */
uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo,
- my_off_t page, uchar *buff, int return_buffer)
+ my_off_t page, int level,
+ uchar *buff, int return_buffer)
{
uchar *tmp;
uint page_size;
DBUG_ENTER("_mi_fetch_keypage");
DBUG_PRINT("enter",("page: %ld",page));
- tmp=(uchar*) key_cache_read(info->s->kfile,page,(byte*) buff,
+ tmp=(uchar*) key_cache_read(*info->s->keycache,
+ info->s->kfile, page, level, (byte*) buff,
(uint) keyinfo->block_length,
(uint) keyinfo->block_length,
return_buffer);
@@ -61,7 +63,7 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo,
/* Write a key-page on disk */
int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo,
- my_off_t page, uchar *buff)
+ my_off_t page, int level, uchar *buff)
{
reg3 uint length;
DBUG_ENTER("_mi_write_keypage");
@@ -92,7 +94,8 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo,
length=keyinfo->block_length;
}
#endif
- DBUG_RETURN((key_cache_write(info->s->kfile,page,(byte*) buff,length,
+ DBUG_RETURN((key_cache_write(*info->s->keycache,
+ info->s->kfile,page, level, (byte*) buff,length,
(uint) keyinfo->block_length,
(int) ((info->lock_type != F_UNLCK) ||
info->s->delay_key_write))));
@@ -101,7 +104,8 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo,
/* Remove page from disk */
-int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos)
+int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos,
+ int level)
{
my_off_t old_link;
char buff[8];
@@ -112,7 +116,8 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos)
info->s->state.key_del[keyinfo->block_size]=pos;
mi_sizestore(buff,old_link);
info->s->state.changed|= STATE_NOT_SORTED_PAGES;
- DBUG_RETURN(key_cache_write(info->s->kfile,pos,buff,
+ DBUG_RETURN(key_cache_write(*info->s->keycache,
+ info->s->kfile, pos , level, buff,
sizeof(buff),
(uint) keyinfo->block_length,
(int) (info->lock_type != F_UNLCK)));
@@ -121,7 +126,7 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos)
/* Make new page on disk */
-my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo)
+my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo, int level)
{
my_off_t pos;
char buff[8];
@@ -140,7 +145,8 @@ my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo)
}
else
{
- if (!key_cache_read(info->s->kfile,pos,
+ if (!key_cache_read(*info->s->keycache,
+ info->s->kfile, pos, level,
buff,
(uint) sizeof(buff),
(uint) keyinfo->block_length,0))
diff --git a/myisam/mi_panic.c b/myisam/mi_panic.c
index bd0b07b097e..f1d1d839fcd 100644
--- a/myisam/mi_panic.c
+++ b/myisam/mi_panic.c
@@ -48,7 +48,7 @@ int mi_panic(enum ha_panic_function flag)
if (info->s->options & HA_OPTION_READ_ONLY_DATA)
break;
#endif
- if (flush_key_blocks(info->s->kfile,FLUSH_RELEASE))
+ if (flush_key_blocks(*info->s->keycache,info->s->kfile,FLUSH_RELEASE))
error=my_errno;
if (info->opt_flag & WRITE_CACHE_USED)
if (flush_io_cache(&info->rec_cache))
diff --git a/myisam/mi_preload.c b/myisam/mi_preload.c
index a5d9bec160e..dc4fff5700a 100644
--- a/myisam/mi_preload.c
+++ b/myisam/mi_preload.c
@@ -69,7 +69,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
if (!(buff= (uchar *) my_malloc(length, MYF(MY_WME))))
DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM);
- if (flush_key_blocks(share->kfile, FLUSH_RELEASE))
+ if (flush_key_blocks(*share->keycache,share->kfile, FLUSH_RELEASE))
goto err;
do
@@ -87,7 +87,9 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
{
if (mi_test_if_nod(buff))
{
- if (key_cache_insert(share->kfile, pos, (byte*) buff, block_length))
+ if (key_cache_insert(*share->keycache,
+ share->kfile, pos, DFLT_INIT_HITS,
+ (byte*) buff, block_length))
goto err;
}
pos+= block_length;
@@ -97,7 +99,9 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
}
else
{
- if (key_cache_insert(share->kfile, pos, (byte*) buff, length))
+ if (key_cache_insert(*share->keycache,
+ share->kfile, pos, DFLT_INIT_HITS,
+ (byte*) buff, length))
goto err;
pos+= length;
}
diff --git a/myisam/mi_range.c b/myisam/mi_range.c
index 379ffba135a..caa57ce6187 100644
--- a/myisam/mi_range.c
+++ b/myisam/mi_range.c
@@ -142,7 +142,7 @@ static double _mi_search_pos(register MI_INFO *info,
if (pos == HA_OFFSET_ERROR)
DBUG_RETURN(0.5);
- if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,info->buff,1)))
+ if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff,1)))
goto err;
flag=(*keyinfo->bin_search)(info,keyinfo,buff,key,key_len,nextflag,
&keypos,info->lastkey, &after_key);
diff --git a/myisam/mi_search.c b/myisam/mi_search.c
index 7f4c90d1bc1..2871633102d 100644
--- a/myisam/mi_search.c
+++ b/myisam/mi_search.c
@@ -76,7 +76,7 @@ int _mi_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
DBUG_RETURN(1); /* Search at upper levels */
}
- if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,info->buff,
+ if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff,
test(!(nextflag & SEARCH_SAVE_BUFF)))))
goto err;
DBUG_DUMP("page",(byte*) buff,mi_getint(buff));
@@ -119,7 +119,7 @@ int _mi_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (pos != info->last_keypage)
{
uchar *old_buff=buff;
- if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,info->buff,
+ if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff,
test(!(nextflag & SEARCH_SAVE_BUFF)))))
goto err;
keypos=buff+(keypos-old_buff);
@@ -1108,7 +1108,7 @@ int _mi_search_next(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (info->buff_used)
{
if (!_mi_fetch_keypage(info,keyinfo,info->last_search_keypage,
- info->buff,0))
+ DFLT_INIT_HITS,info->buff,0))
DBUG_RETURN(-1);
info->buff_used=0;
}
@@ -1177,7 +1177,7 @@ int _mi_search_first(register MI_INFO *info, register MI_KEYDEF *keyinfo,
do
{
- if (!_mi_fetch_keypage(info,keyinfo,pos,info->buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff,0))
{
info->lastpos= HA_OFFSET_ERROR;
DBUG_RETURN(-1);
@@ -1220,7 +1220,7 @@ int _mi_search_last(register MI_INFO *info, register MI_KEYDEF *keyinfo,
buff=info->buff;
do
{
- if (!_mi_fetch_keypage(info,keyinfo,pos,buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,buff,0))
{
info->lastpos= HA_OFFSET_ERROR;
DBUG_RETURN(-1);
diff --git a/myisam/mi_test1.c b/myisam/mi_test1.c
index 5a6818b8a3e..ec68e23c0c2 100644
--- a/myisam/mi_test1.c
+++ b/myisam/mi_test1.c
@@ -50,7 +50,7 @@ int main(int argc,char *argv[])
MY_INIT(argv[0]);
my_init();
if (key_cacheing)
- init_key_cache(IO_SIZE*16);
+ init_key_cache(dflt_keycache,512,IO_SIZE*16,0);
get_options(argc,argv);
exit(run_test("test1"));
diff --git a/myisam/mi_test2.c b/myisam/mi_test2.c
index 5c066075e49..5a40db3171d 100644
--- a/myisam/mi_test2.c
+++ b/myisam/mi_test2.c
@@ -49,6 +49,7 @@ static int verbose=0,testflag=0,
static int pack_seg=HA_SPACE_PACK,pack_type=HA_PACK_KEY,remove_count=-1,
create_flag=0;
static ulong key_cache_size=IO_SIZE*16;
+static uint key_cache_block_size=IO_SIZE;
static uint keys=MYISAM_KEYS,recant=1000;
static uint use_blob=0;
@@ -214,7 +215,7 @@ int main(int argc, char *argv[])
if (!silent)
printf("- Writing key:s\n");
if (key_cacheing)
- init_key_cache(key_cache_size); /* Use a small cache */
+ init_key_cache(dflt_keycache,key_cache_block_size,key_cache_size,0); /* Use a small cache */
if (locking)
mi_lock_database(file,F_WRLCK);
if (write_cacheing)
@@ -274,7 +275,7 @@ int main(int argc, char *argv[])
goto end;
}
if (key_cacheing)
- resize_key_cache(key_cache_size*2);
+ resize_key_cache(dflt_keycache,key_cache_block_size,key_cache_size*2);
}
if (!silent)
@@ -816,16 +817,19 @@ end:
puts("Locking used");
if (use_blob)
puts("blobs used");
+#if 0
printf("key cache status: \n\
blocks used:%10lu\n\
w_requests: %10lu\n\
writes: %10lu\n\
r_requests: %10lu\n\
reads: %10lu\n",
- my_blocks_used, my_cache_w_requests, my_cache_write,
+ my_blocks_used,
+ my_cache_w_requests, my_cache_write,
my_cache_r_requests, my_cache_read);
+#endif
}
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
if (blob_buffer)
my_free(blob_buffer,MYF(0));
my_end(silent ? MY_CHECK_ERROR : MY_CHECK_ERROR | MY_GIVE_INFO);
diff --git a/myisam/mi_test3.c b/myisam/mi_test3.c
index 63cea4f715d..866c6299fac 100644
--- a/myisam/mi_test3.c
+++ b/myisam/mi_test3.c
@@ -177,7 +177,7 @@ void start_test(int id)
exit(1);
}
if (key_cacheing && rnd(2) == 0)
- init_key_cache(65536L);
+ init_key_cache(dflt_keycache,512,65536L,0);
printf("Process %d, pid: %d\n",id,getpid()); fflush(stdout);
for (error=i=0 ; i < tests && !error; i++)
diff --git a/myisam/mi_write.c b/myisam/mi_write.c
index 8e0b7e3530c..1fd5cddec62 100644
--- a/myisam/mi_write.c
+++ b/myisam/mi_write.c
@@ -290,8 +290,8 @@ int _mi_enlarge_root(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
mi_putint(info->buff,t_length+2+nod_flag,nod_flag);
(*keyinfo->store_key)(keyinfo,info->buff+2+nod_flag,&s_temp);
info->buff_used=info->page_changed=1; /* info->buff is used */
- if ((*root= _mi_new(info,keyinfo)) == HA_OFFSET_ERROR ||
- _mi_write_keypage(info,keyinfo,*root,info->buff))
+ if ((*root= _mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR ||
+ _mi_write_keypage(info,keyinfo,*root,DFLT_INIT_HITS,info->buff))
DBUG_RETURN(-1);
DBUG_RETURN(0);
} /* _mi_enlarge_root */
@@ -322,7 +322,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (!(temp_buff= (uchar*) my_alloca((uint) keyinfo->block_length+
MI_MAX_KEY_BUFF*2)))
DBUG_RETURN(-1);
- if (!_mi_fetch_keypage(info,keyinfo,page,temp_buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff,0))
goto err;
flag=(*keyinfo->bin_search)(info,keyinfo,temp_buff,key,search_key_length,
@@ -368,7 +368,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
subkeys--; /* should there be underflow protection ? */
ft_intXstore(keypos, subkeys);
if (!error)
- error=_mi_write_keypage(info,keyinfo,page,temp_buff);
+ error=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff);
my_afree((byte*) temp_buff);
DBUG_RETURN(error);
}
@@ -391,7 +391,7 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
{
error=_mi_insert(info,keyinfo,key,temp_buff,keypos,keybuff,father_buff,
father_keypos,father_page, insert_last);
- if (_mi_write_keypage(info,keyinfo,page,temp_buff))
+ if (_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff))
goto err;
}
my_afree((byte*) temp_buff);
@@ -515,7 +515,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo,
}
/* Move middle item to key and pointer to new page */
- if ((new_pos=_mi_new(info,keyinfo)) == HA_OFFSET_ERROR)
+ if ((new_pos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
DBUG_RETURN(-1);
_mi_kpointer(info,_mi_move_key(keyinfo,key,key_buff),new_pos);
@@ -531,7 +531,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo,
(*keyinfo->store_key)(keyinfo,info->buff+key_ref_length,&s_temp);
mi_putint(info->buff,length+t_length+key_ref_length,nod_flag);
- if (_mi_write_keypage(info,keyinfo,new_pos,info->buff))
+ if (_mi_write_keypage(info,keyinfo,new_pos,DFLT_INIT_HITS,info->buff))
DBUG_RETURN(-1);
DBUG_DUMP("key",(byte*) key,_mi_keylength(keyinfo,key));
DBUG_RETURN(2); /* Middle key up */
@@ -682,7 +682,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
DBUG_PRINT("test",("use left page: %lu",next_page));
} /* father_key_pos ptr to parting key */
- if (!_mi_fetch_keypage(info,keyinfo,next_page,info->buff,0))
+ if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff,0))
goto err;
DBUG_DUMP("next",(byte*) info->buff,mi_getint(info->buff));
@@ -722,8 +722,8 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
memcpy((byte*) buff+2,(byte*) pos+k_length,(size_t) length);
}
- if (_mi_write_keypage(info,keyinfo,next_page,info->buff) ||
- _mi_write_keypage(info,keyinfo,father_page,father_buff))
+ if (_mi_write_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff) ||
+ _mi_write_keypage(info,keyinfo,father_page,DFLT_INIT_HITS,father_buff))
goto err;
DBUG_RETURN(0);
}
@@ -763,12 +763,13 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
memcpy((byte*) (right ? key : father_key_pos),pos,(size_t) k_length);
memcpy((byte*) (right ? father_key_pos : key),tmp_part_key, k_length);
- if ((new_pos=_mi_new(info,keyinfo)) == HA_OFFSET_ERROR)
+ if ((new_pos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
goto err;
_mi_kpointer(info,key+k_length,new_pos);
if (_mi_write_keypage(info,keyinfo,(right ? new_pos : next_page),
- info->buff) ||
- _mi_write_keypage(info,keyinfo,(right ? next_page : new_pos),extra_buff))
+ DFLT_INIT_HITS,info->buff) ||
+ _mi_write_keypage(info,keyinfo,(right ? next_page : new_pos),
+ DFLT_INIT_HITS,extra_buff))
goto err;
DBUG_RETURN(1); /* Middle key up */
diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c
index 76d3205dc01..b03f76ea8e8 100644
--- a/myisam/myisamchk.c
+++ b/myisam/myisamchk.c
@@ -44,6 +44,7 @@ static const char *load_default_groups[]= { "myisamchk", 0 };
static const char *set_charset_name, *opt_tmpdir;
static CHARSET_INFO *set_charset;
static long opt_myisam_block_size;
+static long opt_key_cache_block_size;
static const char *my_progname_short;
static int stopwords_inited= 0;
static MY_TMPDIR myisamchk_tmpdir;
@@ -148,7 +149,8 @@ int main(int argc, char **argv)
enum options_mc {
OPT_CHARSETS_DIR=256, OPT_SET_CHARSET,OPT_START_CHECK_POS,
- OPT_CORRECT_CHECKSUM, OPT_KEY_BUFFER_SIZE, OPT_MYISAM_BLOCK_SIZE,
+ OPT_CORRECT_CHECKSUM, OPT_KEY_BUFFER_SIZE,
+ OPT_KEY_CACHE_BLOCK_SIZE, OPT_MYISAM_BLOCK_SIZE,
OPT_READ_BUFFER_SIZE, OPT_WRITE_BUFFER_SIZE, OPT_SORT_BUFFER_SIZE,
OPT_SORT_KEY_BLOCKS, OPT_DECODE_BITS, OPT_FT_MIN_WORD_LEN,
OPT_FT_MAX_WORD_LEN, OPT_FT_MAX_WORD_LEN_FOR_SORT
@@ -283,6 +285,11 @@ static struct my_option my_long_options[] =
(gptr*) &check_param.use_buffers, (gptr*) &check_param.use_buffers, 0,
GET_ULONG, REQUIRED_ARG, (long) USE_BUFFER_INIT, (long) MALLOC_OVERHEAD,
(long) ~0L, (long) MALLOC_OVERHEAD, (long) IO_SIZE, 0},
+ { "key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "",
+ (gptr*) &opt_key_cache_block_size,
+ (gptr*) &opt_key_cache_block_size, 0,
+ GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH,
+ MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0},
{ "myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "",
(gptr*) &opt_myisam_block_size, (gptr*) &opt_myisam_block_size, 0,
GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH,
@@ -1025,7 +1032,8 @@ static int myisamchk(MI_CHECK *param, my_string filename)
!(param->testflag & (T_FAST | T_FORCE_CREATE)))
{
if (param->testflag & (T_EXTEND | T_MEDIUM))
- VOID(init_key_cache(param->use_buffers));
+ VOID(init_key_cache(dflt_keycache,opt_key_cache_block_size,
+ param->use_buffers,&dflt_key_cache_var));
VOID(init_io_cache(&param->read_cache,datafile,
(uint) param->read_buffer_length,
READ_CACHE,
@@ -1448,7 +1456,8 @@ static int mi_sort_records(MI_CHECK *param,
if (share->state.key_root[sort_key] == HA_OFFSET_ERROR)
DBUG_RETURN(0); /* Nothing to do */
- init_key_cache(param->use_buffers);
+ init_key_cache(dflt_keycache,opt_key_cache_block_size,param->use_buffers,
+ &dflt_key_cache_var);
if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length,
WRITE_CACHE,share->pack.header_length,1,
MYF(MY_WME | MY_WAIT_IF_FULL)))
diff --git a/myisam/myisamdef.h b/myisam/myisamdef.h
index 9844bb7b36d..fe9cd151c57 100644
--- a/myisam/myisamdef.h
+++ b/myisam/myisamdef.h
@@ -166,6 +166,8 @@ typedef struct st_mi_isam_share { /* Shared between opens */
char *data_file_name, /* Resolved path names from symlinks */
*index_file_name;
byte *file_map; /* mem-map of file if possible */
+ KEY_CACHE_HANDLE *keycache; /* ref to the current key cache */
+ KEY_CACHE_HANDLE *reg_keycache; /* ref to the registered key cache */
MI_DECODE_TREE *decode_trees;
uint16 *decode_tables;
int (*read_record)(struct st_myisam_info*, my_off_t, byte*);
@@ -546,11 +548,12 @@ extern int _mi_search_next(MI_INFO *info,MI_KEYDEF *keyinfo,uchar *key,
extern int _mi_search_first(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos);
extern int _mi_search_last(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos);
extern uchar *_mi_fetch_keypage(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t page,
- uchar *buff,int return_buffer);
+ int level,uchar *buff,int return_buffer);
extern int _mi_write_keypage(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t page,
- uchar *buff);
-extern int _mi_dispose(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos);
-extern my_off_t _mi_new(MI_INFO *info,MI_KEYDEF *keyinfo);
+ int level, uchar *buff);
+extern int _mi_dispose(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos,
+ int level);
+extern my_off_t _mi_new(MI_INFO *info,MI_KEYDEF *keyinfo,int level);
extern uint _mi_make_key(MI_INFO *info,uint keynr,uchar *key,
const byte *record,my_off_t filepos);
extern uint _mi_pack_key(MI_INFO *info,uint keynr,uchar *key,uchar *old,
diff --git a/myisam/myisamlog.c b/myisam/myisamlog.c
index ca164fcdaca..9b987364521 100644
--- a/myisam/myisamlog.c
+++ b/myisam/myisamlog.c
@@ -333,7 +333,8 @@ static int examine_log(my_string file_name, char **table_names)
bzero((gptr) com_count,sizeof(com_count));
init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1,
(tree_element_free) file_info_free, NULL);
- VOID(init_key_cache(KEY_CACHE_SIZE));
+ VOID(init_key_cache(dflt_keycache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE,
+ &dflt_key_cache_var));
files_open=0; access_time=0;
while (access_time++ != number_of_commands &&
@@ -647,7 +648,7 @@ static int examine_log(my_string file_name, char **table_names)
goto end;
}
}
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
delete_tree(&tree);
VOID(end_io_cache(&cache));
VOID(my_close(file,MYF(0)));
@@ -667,7 +668,7 @@ static int examine_log(my_string file_name, char **table_names)
llstr(isamlog_filepos,llbuff)));
fflush(stderr);
end:
- end_key_cache();
+ end_key_cache(dflt_keycache,1);
delete_tree(&tree);
VOID(end_io_cache(&cache));
VOID(my_close(file,MYF(0)));
diff --git a/myisam/rt_index.c b/myisam/rt_index.c
index f02d6121eb5..8b877d2e65c 100644
--- a/myisam/rt_index.c
+++ b/myisam/rt_index.c
@@ -60,7 +60,7 @@ static int rtree_find_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint search_flag, u
my_errno = HA_ERR_OUT_OF_MEM;
return -1;
}
- if (!_mi_fetch_keypage(info, keyinfo, page, page_buf, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
@@ -257,7 +257,7 @@ static int rtree_get_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint key_length,
if (!(page_buf = (uchar*)my_alloca((uint)keyinfo->block_length)))
return -1;
- if (!_mi_fetch_keypage(info, keyinfo, page, page_buf, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
@@ -429,7 +429,7 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
my_errno = HA_ERR_OUT_OF_MEM;
return -1;
}
- if (!_mi_fetch_keypage(info, keyinfo, page, page_buf, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
@@ -445,7 +445,7 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
case 0: /* child was not split */
{
rtree_combine_rect(keyinfo->seg, k, key, k, key_length);
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf))
goto err1;
goto ok;
}
@@ -462,7 +462,7 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
goto err1;
res = rtree_add_key(info, keyinfo, new_key, key_length,
page_buf, new_page);
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf))
goto err1;
goto ok;
}
@@ -476,7 +476,7 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
else
{
res = rtree_add_key(info, keyinfo, key, key_length, page_buf, new_page);
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf))
goto err1;
goto ok;
}
@@ -509,12 +509,12 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key,
{
int res;
- if ((old_root = _mi_new(info, keyinfo)) == HA_OFFSET_ERROR)
+ if ((old_root = _mi_new(info, keyinfo, DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
return -1;
info->buff_used = 1;
mi_putint(info->buff, 2, 0);
res = rtree_add_key(info, keyinfo, key, key_length, info->buff, NULL);
- if (_mi_write_keypage(info, keyinfo, old_root, info->buff))
+ if (_mi_write_keypage(info, keyinfo, old_root, DFLT_INIT_HITS, info->buff))
return 1;
info->s->state.key_root[keynr] = old_root;
return res;
@@ -542,7 +542,8 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key,
}
mi_putint(new_root_buf, 2, nod_flag);
- if ((new_root = _mi_new(info, keyinfo)) == HA_OFFSET_ERROR)
+ if ((new_root = _mi_new(info, keyinfo, DFLT_INIT_HITS)) ==
+ HA_OFFSET_ERROR)
goto err1;
new_key = new_root_buf + keyinfo->block_length + nod_flag;
@@ -559,7 +560,8 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key,
if (rtree_add_key(info, keyinfo, new_key, key_length, new_root_buf, NULL)
== -1)
goto err1;
- if (_mi_write_keypage(info, keyinfo, new_root, new_root_buf))
+ if (_mi_write_keypage(info, keyinfo, new_root,
+ DFLT_INIT_HITS, new_root_buf))
goto err1;
info->s->state.key_root[keynr] = new_root;
@@ -636,7 +638,7 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
my_errno = HA_ERR_OUT_OF_MEM;
return -1;
}
- if (!_mi_fetch_keypage(info, keyinfo, page, page_buf, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
@@ -662,7 +664,8 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
if (rtree_set_key_mbr(info, keyinfo, k, key_length,
_mi_kpos(nod_flag, k)))
goto err1;
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page,
+ DFLT_INIT_HITS, page_buf))
goto err1;
}
else
@@ -672,7 +675,8 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
level + 1))
goto err1;
rtree_delete_key(info, page_buf, k, key_length, nod_flag);
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page,
+ DFLT_INIT_HITS, page_buf))
goto err1;
*page_size = mi_getint(page_buf);
}
@@ -686,7 +690,8 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
case 2: /* vacuous case: last key in the leaf */
{
rtree_delete_key(info, page_buf, k, key_length, nod_flag);
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page,
+ DFLT_INIT_HITS, page_buf))
goto err1;
*page_size = mi_getint(page_buf);
res = 0;
@@ -711,13 +716,13 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
{
/* last key in the leaf */
res = 2;
- if (_mi_dispose(info, keyinfo, page))
+ if (_mi_dispose(info, keyinfo, page, DFLT_INIT_HITS))
goto err1;
}
else
{
res = 0;
- if (_mi_write_keypage(info, keyinfo, page, page_buf))
+ if (_mi_write_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf))
goto err1;
}
goto ok;
@@ -783,7 +788,7 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
goto err1;
}
if (!_mi_fetch_keypage(info, keyinfo, ReinsertList.pages[i].offs,
- page_buf, 0))
+ DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
k = rt_PAGE_FIRST_KEY(page_buf, nod_flag);
@@ -798,7 +803,8 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
}
}
my_afree((byte*)page_buf);
- if (_mi_dispose(info, keyinfo, ReinsertList.pages[i].offs))
+ if (_mi_dispose(info, keyinfo, ReinsertList.pages[i].offs,
+ DFLT_INIT_HITS))
goto err1;
}
if (ReinsertList.pages)
@@ -807,7 +813,8 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
/* check for redundant root (not leaf, 1 child) and eliminate */
if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR)
goto err1;
- if (!_mi_fetch_keypage(info, keyinfo, old_root, info->buff, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, old_root, DFLT_INIT_HITS,
+ info->buff, 0))
goto err1;
nod_flag = mi_test_if_nod(info->buff);
page_size = mi_getint(info->buff);
@@ -816,7 +823,7 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
{
my_off_t new_root = _mi_kpos(nod_flag,
rt_PAGE_FIRST_KEY(info->buff, nod_flag));
- if (_mi_dispose(info, keyinfo, old_root))
+ if (_mi_dispose(info, keyinfo, old_root, DFLT_INIT_HITS))
goto err1;
info->s->state.key_root[keynr] = new_root;
}
@@ -863,7 +870,7 @@ ha_rows rtree_estimate(MI_INFO *info, uint keynr, uchar *key,
return HA_POS_ERROR;
if (!(page_buf = (uchar*)my_alloca((uint)keyinfo->block_length)))
return HA_POS_ERROR;
- if (!_mi_fetch_keypage(info, keyinfo, root, page_buf, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, root, DFLT_INIT_HITS, page_buf, 0))
goto err1;
nod_flag = mi_test_if_nod(page_buf);
diff --git a/myisam/rt_key.c b/myisam/rt_key.c
index dfabf7624d2..f18d13af8d8 100644
--- a/myisam/rt_key.c
+++ b/myisam/rt_key.c
@@ -88,7 +88,8 @@ int rtree_delete_key(MI_INFO *info, uchar *page_buf, uchar *key,
int rtree_set_key_mbr(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
uint key_length, my_off_t child_page)
{
- if (!_mi_fetch_keypage(info, keyinfo, child_page, info->buff, 0))
+ if (!_mi_fetch_keypage(info, keyinfo, child_page,
+ DFLT_INIT_HITS, info->buff, 0))
return -1;
return rtree_page_mbr(info, keyinfo->seg, info->buff, key, key_length);
diff --git a/myisam/rt_split.c b/myisam/rt_split.c
index 72a3c4887ab..41d6f8f8ccd 100644
--- a/myisam/rt_split.c
+++ b/myisam/rt_split.c
@@ -332,10 +332,12 @@ int rtree_split_page(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uchar *key,
mi_putint(page, 2 + n1 * full_length, nod_flag);
mi_putint(new_page, 2 + n2 * full_length, nod_flag);
- if ((*new_page_offs= _mi_new(info, keyinfo)) == HA_OFFSET_ERROR)
+ if ((*new_page_offs= _mi_new(info, keyinfo, DFLT_INIT_HITS)) ==
+ HA_OFFSET_ERROR)
err_code= -1;
else
- err_code= _mi_write_keypage(info, keyinfo, *new_page_offs, new_page);
+ err_code= _mi_write_keypage(info, keyinfo, *new_page_offs,
+ DFLT_INIT_HITS, new_page);
my_afree((byte*)new_page);
diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result
index 8b7581c3cc8..2fb38ffe592 100644
--- a/mysql-test/r/func_group.result
+++ b/mysql-test/r/func_group.result
@@ -514,7 +514,7 @@ id select_type table type possible_keys key key_len ref rows Extra
explain
select min(a1) from t1 where a1 != 'KKK';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 3 NULL 14 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 3 NULL 14 Using where; Using index
explain
select max(a3) from t1 where a2 < 2 and a3 < 'SEA';
id select_type table type possible_keys key key_len ref rows Extra
@@ -560,7 +560,7 @@ explain
select concat(min(t1.a1),min(t2.a4)) from t1, t2 where t2.a4 <> 'AME';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 3 NULL 14 Using index
-1 SIMPLE t2 index NULL k2 4 NULL 6 Using where; Using index
+1 SIMPLE t2 range k2 k2 4 NULL 6 Using where; Using index
drop table t1, t2;
CREATE TABLE t1 (a int, b int);
select count(b), sum(b), avg(b), std(b), min(b), max(b), bit_and(b), bit_or(b) from t1;
diff --git a/mysql-test/r/key_cache.result b/mysql-test/r/key_cache.result
index dd45cb51d33..79fe0599bc4 100644
--- a/mysql-test/r/key_cache.result
+++ b/mysql-test/r/key_cache.result
@@ -41,3 +41,194 @@ SELECT @@default.key_buffer_size;
ERROR 42000: You have an error in your SQL syntax. Check the manual that corresponds to your MySQL server version for the right syntax to use near 'default.key_buffer_size' at line 1
SELECT @@skr.table_type="test";
ERROR HY000: Variable 'table_type' is not a variable component (Can't be used as XXXX.variable_name)
+select @@keycache1.key_cache_block_size;
+@@keycache1.key_cache_block_size
+0
+select @@keycache1.key_buffer_size;
+@@keycache1.key_buffer_size
+0
+set global keycache1.key_cache_block_size=2048;
+select @@keycache1.key_buffer_size;
+@@keycache1.key_buffer_size
+0
+select @@keycache1.key_cache_block_size;
+@@keycache1.key_cache_block_size
+2048
+set global keycache1.key_buffer_size=1*1024*1024;
+select @@keycache1.key_buffer_size;
+@@keycache1.key_buffer_size
+1048576
+select @@keycache1.key_cache_block_size;
+@@keycache1.key_cache_block_size
+2048
+set global keycache2.key_buffer_size=4*1024*1024;
+select @@keycache2.key_buffer_size;
+@@keycache2.key_buffer_size
+4194304
+select @@keycache2.key_cache_block_size;
+@@keycache2.key_cache_block_size
+1024
+set global keycache1.key_buffer_size=0;
+select @@keycache1.key_buffer_size;
+@@keycache1.key_buffer_size
+0
+select @@keycache1.key_cache_block_size;
+@@keycache1.key_cache_block_size
+0
+select @@key_buffer_size;
+@@key_buffer_size
+2097152
+select @@key_cache_block_size;
+@@key_cache_block_size
+1024
+drop table if exists t1, t2;
+create table t1 (p int primary key, a char(10));
+create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a));
+insert into t1 values (1, 'qqqq'), (11, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+(3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+p a
+1 qqqq
+11 yyyy
+select * from t2;
+p i a
+1 1 qqqq
+2 1 pppp
+3 1 yyyy
+4 3 zzzz
+update t1 set p=2 where p=1;
+update t2 set i=2 where i=1;
+cache index t1 keys in keycache1;
+Table Op Msg_type Msg_text
+test.t1 assign_to_keycache status OK
+explain select p from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index
+select p from t1;
+p
+2
+11
+explain select i from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k1 5 NULL 4 Using index
+select i from t2;
+i
+2
+2
+2
+3
+explain select count(*) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 2 Using index
+1 SIMPLE t2 ref k1 k1 5 test.t1.p 2 Using where; Using index
+select count(*) from t1, t2 where t1.p = t2.i;
+count(*)
+3
+cache index t2 keys in keycache1;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+update t2 set p=p+1000, i=2 where a='qqqq';
+cache index t2 keys in keycache2;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+insert into t2 values (2000, 3, 'yyyy');
+cache index t2 keys in keycache1;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+update t2 set p=3000 where a='zzzz';
+select * from t2;
+p i a
+1001 2 qqqq
+2 2 pppp
+3 2 yyyy
+3000 3 zzzz
+2000 3 yyyy
+explain select p from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL PRIMARY 4 NULL 5 Using index
+select p from t2;
+p
+2
+3
+1001
+2000
+3000
+explain select i from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k1 5 NULL 5 Using index
+select i from t2;
+i
+2
+2
+2
+3
+3
+explain select a from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k2 11 NULL 5 Using index
+select a from t2;
+a
+pppp
+qqqq
+yyyy
+yyyy
+zzzz
+select @@keycache2.key_buffer_size;
+@@keycache2.key_buffer_size
+4194304
+select @@keycache2.key_cache_block_size;
+@@keycache2.key_cache_block_size
+1024
+set global keycache2.key_buffer_size=0;
+select @@keycache2.key_buffer_size;
+@@keycache2.key_buffer_size
+0
+select @@keycache2.key_cache_block_size;
+@@keycache2.key_cache_block_size
+0
+update t2 set p=4000 where a='zzzz';
+update t1 set p=p+1;
+set global keycache1.key_buffer_size=0;
+select * from t2;
+p i a
+1001 2 qqqq
+2 2 pppp
+3 2 yyyy
+4000 3 zzzz
+2000 3 yyyy
+select p from t2;
+p
+2
+3
+1001
+2000
+4000
+explain select i from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k1 5 NULL 5 Using index
+select i from t2;
+i
+2
+2
+2
+3
+3
+explain select a from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k2 11 NULL 5 Using index
+select a from t2;
+a
+pppp
+qqqq
+yyyy
+yyyy
+zzzz
+select * from t1;
+p a
+3 qqqq
+12 yyyy
+select p from t1;
+p
+3
+12
diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result
index 9f76676ee1a..db477fc662f 100644
--- a/mysql-test/r/range.result
+++ b/mysql-test/r/range.result
@@ -1,4 +1,4 @@
-drop table if exists t1;
+drop table if exists t1, t2;
CREATE TABLE t1 (
event_date date DEFAULT '0000-00-00' NOT NULL,
type int(11) DEFAULT '0' NOT NULL,
@@ -215,17 +215,6 @@ select count(*) from t1 where art = 'J';
count(*)
213
drop table t1;
-create table t1 ( id1 int not null, id2 int not null, idnull int null, c char(20), primary key (id1,id2));
-insert into t1 values (0,1,NULL,"aaa"), (1,1,NULL,"aaa"), (2,1,NULL,"aaa"),
-(3,1,NULL,"aaa"), (4,1,NULL,"aaa"), (5,1,NULL,"aaa"),
-(6,1,NULL,"aaa"), (7,1,NULL,"aaa"), (8,1,NULL,"aaa"),
-(9,1,NULL,"aaa"), (10,1,NULL,"aaa"), (11,1,NULL,"aaa"),
-(12,1,NULL,"aaa"), (13,1,NULL,"aaa"), (14,1,NULL,"aaa"),
-(15,1,NULL,"aaa"), (16,1,NULL,"aaa"), (17,1,NULL,"aaa"),
-(18,1,NULL,"aaa"), (19,1,NULL,"aaa"), (20,1,NULL,"aaa");
-select a.id1, b.idnull from t1 as a, t1 as b where a.id2=1 and a.id1=1 and b.id1=a.idnull order by b.id2 desc limit 1;
-id1 idnull
-drop table t1;
create table t1 (x int, y int, index(x), index(y));
insert into t1 (x) values (1),(2),(3),(4),(5),(6),(7),(8),(9);
update t1 set y=x;
@@ -286,3 +275,145 @@ id
5
9
drop table t1;
+create table t1 ( id1 int not null, id2 int not null, idnull int null, c char(20), primary key (id1,id2));
+insert into t1 values (0,1,NULL,"aaa"), (1,1,NULL,"aaa"), (2,1,NULL,"aaa"),
+(3,1,NULL,"aaa"), (4,1,NULL,"aaa"), (5,1,NULL,"aaa"),
+(6,1,NULL,"aaa"), (7,1,NULL,"aaa"), (8,1,NULL,"aaa"),
+(9,1,NULL,"aaa"), (10,1,NULL,"aaa"), (11,1,NULL,"aaa"),
+(12,1,NULL,"aaa"), (13,1,NULL,"aaa"), (14,1,NULL,"aaa"),
+(15,1,NULL,"aaa"), (16,1,NULL,"aaa"), (17,1,NULL,"aaa"),
+(18,1,NULL,"aaa"), (19,1,NULL,"aaa"), (20,1,NULL,"aaa");
+select a.id1, b.idnull from t1 as a, t1 as b where a.id2=1 and a.id1=1 and b.id1=a.idnull order by b.id2 desc limit 1;
+id1 idnull
+drop table t1;
+create table t1 (
+id int not null auto_increment,
+name char(1) not null,
+uid int not null,
+primary key (id),
+index uid_index (uid));
+create table t2 (
+id int not null auto_increment,
+name char(1) not null,
+uid int not null,
+primary key (id),
+index uid_index (uid));
+insert into t1(id, uid, name) values(1, 0, ' ');
+insert into t1(uid, name) values(0, ' ');
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+delete from t2;
+insert into t2(uid, name) values
+(1, CHAR(64+1)),
+(2, CHAR(64+2)),
+(3, CHAR(64+3)),
+(4, CHAR(64+4)),
+(5, CHAR(64+5)),
+(6, CHAR(64+6)),
+(7, CHAR(64+7)),
+(8, CHAR(64+8)),
+(9, CHAR(64+9)),
+(10, CHAR(64+10)),
+(11, CHAR(64+11)),
+(12, CHAR(64+12)),
+(13, CHAR(64+13)),
+(14, CHAR(64+14)),
+(15, CHAR(64+15)),
+(16, CHAR(64+16)),
+(17, CHAR(64+17)),
+(18, CHAR(64+18)),
+(19, CHAR(64+19)),
+(20, CHAR(64+20)),
+(21, CHAR(64+21)),
+(22, CHAR(64+22)),
+(23, CHAR(64+23)),
+(24, CHAR(64+24)),
+(25, CHAR(64+25)),
+(26, CHAR(64+26));
+insert into t1(uid, name) select uid, name from t2;
+delete from t2;
+insert into t2(id, uid, name) select id, uid, name from t1;
+select count(*) from t1;
+count(*)
+1026
+select count(*) from t2;
+count(*)
+1026
+explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range uid_index uid_index 4 NULL 128 Using where
+1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
+explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range uid_index uid_index 4 NULL 129 Using where
+1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
+select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
+id name uid id name uid
+1001 A 1 1001 A 1
+1002 B 2 1002 B 2
+1003 C 3 1003 C 3
+1004 D 4 1004 D 4
+1005 E 5 1005 E 5
+1006 F 6 1006 F 6
+1007 G 7 1007 G 7
+1008 H 8 1008 H 8
+1009 I 9 1009 I 9
+1010 J 10 1010 J 10
+1011 K 11 1011 K 11
+1012 L 12 1012 L 12
+1013 M 13 1013 M 13
+1014 N 14 1014 N 14
+1015 O 15 1015 O 15
+1016 P 16 1016 P 16
+1017 Q 17 1017 Q 17
+1018 R 18 1018 R 18
+1019 S 19 1019 S 19
+1020 T 20 1020 T 20
+1021 U 21 1021 U 21
+1022 V 22 1022 V 22
+1023 W 23 1023 W 23
+1024 X 24 1024 X 24
+1025 Y 25 1025 Y 25
+1026 Z 26 1026 Z 26
+select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
+id name uid id name uid
+1001 A 1 1001 A 1
+1002 B 2 1002 B 2
+1003 C 3 1003 C 3
+1004 D 4 1004 D 4
+1005 E 5 1005 E 5
+1006 F 6 1006 F 6
+1007 G 7 1007 G 7
+1008 H 8 1008 H 8
+1009 I 9 1009 I 9
+1010 J 10 1010 J 10
+1011 K 11 1011 K 11
+1012 L 12 1012 L 12
+1013 M 13 1013 M 13
+1014 N 14 1014 N 14
+1015 O 15 1015 O 15
+1016 P 16 1016 P 16
+1017 Q 17 1017 Q 17
+1018 R 18 1018 R 18
+1019 S 19 1019 S 19
+1020 T 20 1020 T 20
+1021 U 21 1021 U 21
+1022 V 22 1022 V 22
+1023 W 23 1023 W 23
+1024 X 24 1024 X 24
+1025 Y 25 1025 Y 25
+1026 Z 26 1026 Z 26
+drop table t1,t2;
diff --git a/mysql-test/t/key_cache.test b/mysql-test/t/key_cache.test
index fb6b6b0027f..371d610eb5f 100644
--- a/mysql-test/t/key_cache.test
+++ b/mysql-test/t/key_cache.test
@@ -1,5 +1,5 @@
#
-# Test of key cache
+# Test of multiple key caches
#
SET @save_key_buffer=@@key_buffer_size;
@@ -40,3 +40,83 @@ SET @@global.key_buffer_size=@save_key_buffer;
SELECT @@default.key_buffer_size;
--error 1271
SELECT @@skr.table_type="test";
+
+select @@keycache1.key_cache_block_size;
+select @@keycache1.key_buffer_size;
+set global keycache1.key_cache_block_size=2048;
+select @@keycache1.key_buffer_size;
+select @@keycache1.key_cache_block_size;
+set global keycache1.key_buffer_size=1*1024*1024;
+select @@keycache1.key_buffer_size;
+select @@keycache1.key_cache_block_size;
+set global keycache2.key_buffer_size=4*1024*1024;
+select @@keycache2.key_buffer_size;
+select @@keycache2.key_cache_block_size;
+set global keycache1.key_buffer_size=0;
+select @@keycache1.key_buffer_size;
+select @@keycache1.key_cache_block_size;
+select @@key_buffer_size;
+select @@key_cache_block_size;
+
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+create table t1 (p int primary key, a char(10));
+create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a));
+
+insert into t1 values (1, 'qqqq'), (11, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+ (3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+select * from t2;
+
+update t1 set p=2 where p=1;
+update t2 set i=2 where i=1;
+
+cache index t1 keys in keycache1;
+
+explain select p from t1;
+select p from t1;
+explain select i from t2;
+select i from t2;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+select count(*) from t1, t2 where t1.p = t2.i;
+
+cache index t2 keys in keycache1;
+update t2 set p=p+1000, i=2 where a='qqqq';
+cache index t2 keys in keycache2;
+insert into t2 values (2000, 3, 'yyyy');
+cache index t2 keys in keycache1;
+update t2 set p=3000 where a='zzzz';
+select * from t2;
+explain select p from t2;
+select p from t2;
+explain select i from t2;
+select i from t2;
+explain select a from t2;
+select a from t2;
+
+select @@keycache2.key_buffer_size;
+select @@keycache2.key_cache_block_size;
+set global keycache2.key_buffer_size=0;
+select @@keycache2.key_buffer_size;
+select @@keycache2.key_cache_block_size;
+
+
+update t2 set p=4000 where a='zzzz';
+update t1 set p=p+1;
+
+set global keycache1.key_buffer_size=0;
+select * from t2;
+select p from t2;
+explain select i from t2;
+select i from t2;
+explain select a from t2;
+select a from t2;
+
+select * from t1;
+select p from t1;
+
+
diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test
index 7bf6570b558..40ae49f8005 100644
--- a/mysql-test/t/range.test
+++ b/mysql-test/t/range.test
@@ -3,7 +3,7 @@
#
--disable_warnings
-drop table if exists t1;
+drop table if exists t1, t2;
--enable_warnings
CREATE TABLE t1 (
@@ -174,18 +174,6 @@ select count(*) from t1 where art = 'j' or art = 'J';
select count(*) from t1 where art = 'j';
select count(*) from t1 where art = 'J';
drop table t1;
-
-create table t1 ( id1 int not null, id2 int not null, idnull int null, c char(20), primary key (id1,id2));
-insert into t1 values (0,1,NULL,"aaa"), (1,1,NULL,"aaa"), (2,1,NULL,"aaa"),
- (3,1,NULL,"aaa"), (4,1,NULL,"aaa"), (5,1,NULL,"aaa"),
- (6,1,NULL,"aaa"), (7,1,NULL,"aaa"), (8,1,NULL,"aaa"),
- (9,1,NULL,"aaa"), (10,1,NULL,"aaa"), (11,1,NULL,"aaa"),
- (12,1,NULL,"aaa"), (13,1,NULL,"aaa"), (14,1,NULL,"aaa"),
- (15,1,NULL,"aaa"), (16,1,NULL,"aaa"), (17,1,NULL,"aaa"),
- (18,1,NULL,"aaa"), (19,1,NULL,"aaa"), (20,1,NULL,"aaa");
-select a.id1, b.idnull from t1 as a, t1 as b where a.id2=1 and a.id1=1 and b.id1=a.idnull order by b.id2 desc limit 1;
-drop table t1;
-
#
# BETWEEN problems
#
@@ -225,3 +213,96 @@ insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9);
select id from t1 where id in (2,5,9) ;
select id from t1 where id=2 or id=5 or id=9 ;
drop table t1;
+create table t1 ( id1 int not null, id2 int not null, idnull int null, c char(20), primary key (id1,id2));
+insert into t1 values (0,1,NULL,"aaa"), (1,1,NULL,"aaa"), (2,1,NULL,"aaa"),
+ (3,1,NULL,"aaa"), (4,1,NULL,"aaa"), (5,1,NULL,"aaa"),
+ (6,1,NULL,"aaa"), (7,1,NULL,"aaa"), (8,1,NULL,"aaa"),
+ (9,1,NULL,"aaa"), (10,1,NULL,"aaa"), (11,1,NULL,"aaa"),
+ (12,1,NULL,"aaa"), (13,1,NULL,"aaa"), (14,1,NULL,"aaa"),
+ (15,1,NULL,"aaa"), (16,1,NULL,"aaa"), (17,1,NULL,"aaa"),
+ (18,1,NULL,"aaa"), (19,1,NULL,"aaa"), (20,1,NULL,"aaa");
+select a.id1, b.idnull from t1 as a, t1 as b where a.id2=1 and a.id1=1 and b.id1=a.idnull order by b.id2 desc limit 1;
+drop table t1;
+
+
+#
+# Problem with optimizing !=
+#
+
+create table t1 (
+ id int not null auto_increment,
+ name char(1) not null,
+ uid int not null,
+ primary key (id),
+ index uid_index (uid));
+
+create table t2 (
+ id int not null auto_increment,
+ name char(1) not null,
+ uid int not null,
+ primary key (id),
+ index uid_index (uid));
+
+insert into t1(id, uid, name) values(1, 0, ' ');
+insert into t1(uid, name) values(0, ' ');
+
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t2(uid, name) select uid, name from t1;
+insert into t1(uid, name) select uid, name from t2;
+
+delete from t2;
+insert into t2(uid, name) values
+ (1, CHAR(64+1)),
+ (2, CHAR(64+2)),
+ (3, CHAR(64+3)),
+ (4, CHAR(64+4)),
+ (5, CHAR(64+5)),
+ (6, CHAR(64+6)),
+ (7, CHAR(64+7)),
+ (8, CHAR(64+8)),
+ (9, CHAR(64+9)),
+ (10, CHAR(64+10)),
+ (11, CHAR(64+11)),
+ (12, CHAR(64+12)),
+ (13, CHAR(64+13)),
+ (14, CHAR(64+14)),
+ (15, CHAR(64+15)),
+ (16, CHAR(64+16)),
+ (17, CHAR(64+17)),
+ (18, CHAR(64+18)),
+ (19, CHAR(64+19)),
+ (20, CHAR(64+20)),
+ (21, CHAR(64+21)),
+ (22, CHAR(64+22)),
+ (23, CHAR(64+23)),
+ (24, CHAR(64+24)),
+ (25, CHAR(64+25)),
+ (26, CHAR(64+26));
+
+insert into t1(uid, name) select uid, name from t2;
+
+delete from t2;
+insert into t2(id, uid, name) select id, uid, name from t1;
+
+select count(*) from t1;
+select count(*) from t2;
+
+explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
+explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
+
+select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
+select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
+
+drop table t1,t2;
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index 3276044fc2f..2f5a704234d 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -135,49 +135,61 @@ typedef struct st_block_link
uint offset; /* beginning of modified data in the buffer */
uint length; /* end of data in the buffer */
uint status; /* state of the block */
+ uint hits_left; /* number of hits left until promotion */
+ ulonglong last_hit_time; /* timestamp of the last hit */
KEYCACHE_CONDVAR *condvar; /* condition variable for 'no readers' event */
} BLOCK_LINK;
-static int flush_all_key_blocks();
-static void test_key_cache(const char *where, my_bool lock);
-
-uint key_cache_block_size= /* size of the page buffer of a cache block */
- DEFAULT_KEYCACHE_BLOCK_SIZE;
-static uint key_cache_shift;
+KEY_CACHE_VAR dflt_key_cache_var=
+{
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+KEY_CACHE_HANDLE *dflt_keycache= &dflt_key_cache_var.cache;
#define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */
#define FLUSH_CACHE 2000 /* sort this many blocks at once */
-static KEYCACHE_WQUEUE
- waiting_for_hash_link; /* queue of requests waiting for a free hash link */
-static KEYCACHE_WQUEUE
- waiting_for_block; /* queue of requests waiting for a free block */
-
-static HASH_LINK **my_hash_root; /* arr. of entries into hash table buckets */
-static uint my_hash_entries; /* max number of entries in the hash table */
-static HASH_LINK *my_hash_link_root; /* memory for hash table links */
-static int my_hash_links; /* max number of hash links */
-static int my_hash_links_used; /* number of hash links currently used */
-static HASH_LINK *my_free_hash_list; /* list of free hash links */
-static BLOCK_LINK *my_block_root; /* memory for block links */
-static int my_disk_blocks; /* max number of blocks in the cache */
-static byte HUGE_PTR *my_block_mem; /* memory for block buffers */
-static BLOCK_LINK *my_used_last; /* ptr to the last block of the LRU chain */
-ulong my_blocks_used, /* number of currently used blocks */
- my_blocks_changed; /* number of currently dirty blocks */
+typedef struct st_key_cache
+{
+ KEY_CACHE_VAR *env; /* pointer to key cache variables (if any) */
+ my_bool key_cache_inited;
+ uint key_cache_shift;
+ ulong key_cache_mem_size; /* specified size of the cache memory */
+ uint key_cache_block_size; /* size of the page buffer of a cache block */
+ ulong min_warm_blocks; /* min number of warm blocks; */
+ ulong age_threshold; /* age threshold for hot blocks */
+ ulonglong keycache_time; /* total number of block link operations */
+ uint hash_entries; /* max number of entries in the hash table */
+ int hash_links; /* max number of hash links */
+ int hash_links_used; /* number of hash links currently used */
+ int disk_blocks; /* max number of blocks in the cache */
+ ulong blocks_used; /* number of currently used blocks */
+ ulong blocks_changed; /* number of currently dirty blocks */
+ ulong warm_blocks; /* number of blocks in warm sub-chain */
#if defined(KEYCACHE_DEBUG)
-static
-ulong my_blocks_available; /* number of blocks available in the LRU chain */
-#endif /* defined(KEYCACHE_DEBUG) */
-ulong my_cache_w_requests, my_cache_write, /* counters */
- my_cache_r_requests, my_cache_read; /* for statistics */
-static BLOCK_LINK
- *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash table for file dirty blocks */
-static BLOCK_LINK
- *file_blocks[CHANGED_BLOCKS_HASH]; /* hash table for other file blocks */
- /* that are not free */
+ long blocks_available; /* number of blocks available in the LRU chain */
+#endif
+ HASH_LINK **hash_root; /* arr. of entries into hash table buckets */
+ HASH_LINK *hash_link_root; /* memory for hash table links */
+ HASH_LINK *free_hash_list; /* list of free hash links */
+ BLOCK_LINK *block_root; /* memory for block links */
+ byte HUGE_PTR *block_mem; /* memory for block buffers */
+ BLOCK_LINK *used_last; /* ptr to the last block of the LRU chain */
+ BLOCK_LINK *used_ins; /* ptr to the insertion block in LRU chain */
+ pthread_mutex_t cache_lock; /* to lock access to the cache structure */
+ KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */
+ KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */
+ BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/
+ BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/
+} KEY_CACHE;
+
+static int flush_all_key_blocks(KEY_CACHE_HANDLE keycache);
+static void test_key_cache(KEY_CACHE *keycache,
+ const char *where, my_bool lock);
+
#define KEYCACHE_HASH(f, pos) \
- (((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (my_hash_entries-1))
+(((ulong) ((pos) >> keycache->key_cache_shift)+ \
+ (ulong) (f)) & (keycache->hash_entries-1))
#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1))
#define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log"
@@ -231,9 +243,9 @@ static long keycache_thread_id;
#endif /* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */
#define BLOCK_NUMBER(b) \
- ((uint) (((char*)(b) - (char *) my_block_root) / sizeof(BLOCK_LINK)))
+ ((uint) (((char*)(b)-(char *) keycache->block_root)/sizeof(BLOCK_LINK)))
#define HASH_LINK_NUMBER(h) \
- ((uint) (((char*)(h) - (char *) my_hash_link_root) / sizeof(HASH_LINK)))
+ ((uint) (((char*)(h)-(char *) keycache->hash_link_root)/sizeof(HASH_LINK)))
#if (defined(KEYCACHE_TIMEOUT) && !defined(__WIN__)) || defined(KEYCACHE_DEBUG)
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
@@ -267,193 +279,378 @@ static uint next_power(uint value)
/*
- Initialize the key cache,
- return number of blocks in it
+ Initialize a key cache
+
+ SYNOPSIS
+ init_ky_cache()
+ pkeycache in/out pointer to the key cache handle
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for the key cache
+ env ref to other parameters of the key cache, if any
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES.
+ If pkeycache points to an undefined handle (NULL), a new KEY_CACHE
+ data structure is created and a pointer to it is returned as a new
+ key cache handle, otherwise *pkeycache is considered as a reused
+ handle for a key cache with new blocks.
+ It's assumed that no two threads call this function simultaneously
+ referring to the same key cache handle.
+
*/
-int init_key_cache(ulong use_mem)
+int init_key_cache(KEY_CACHE_HANDLE *pkeycache, uint key_cache_block_size,
+ ulong use_mem, KEY_CACHE_VAR *env)
{
uint blocks, hash_links, length;
int error;
+ KEY_CACHE *keycache;
DBUG_ENTER("init_key_cache");
+ if (!(keycache= (KEY_CACHE *) *pkeycache) &&
+ !(keycache= (KEY_CACHE *) my_malloc(sizeof(KEY_CACHE),
+ MYF(MY_ZEROFILL))))
+ DBUG_RETURN(0);
+
+ keycache->env= env;
+
KEYCACHE_DEBUG_OPEN;
- if (key_cache_inited && my_disk_blocks > 0)
+ if (keycache->key_cache_inited && keycache->disk_blocks > 0)
{
DBUG_PRINT("warning",("key cache already in use"));
DBUG_RETURN(0);
}
- if (! key_cache_inited)
+ if (env && ! keycache->key_cache_inited)
{
- key_cache_inited=TRUE;
- my_disk_blocks= -1;
- key_cache_shift=my_bit_log2(key_cache_block_size);
- DBUG_PRINT("info",("key_cache_block_size: %u",
- key_cache_block_size));
+ env->cache_w_requests= env->cache_r_requests= 0;
+ env->cache_read= env->cache_write=0;
}
- my_cache_w_requests= my_cache_r_requests= my_cache_read= my_cache_write=0;
-
- my_block_mem=NULL;
- my_block_root=NULL;
-
- blocks= (uint) (use_mem/(sizeof(BLOCK_LINK)+2*sizeof(HASH_LINK)+
- sizeof(HASH_LINK*)*5/4+key_cache_block_size));
+ if (! keycache->key_cache_inited)
+ {
+ keycache->key_cache_inited= TRUE;
+ keycache->disk_blocks= -1;
+ pthread_mutex_init(&keycache->cache_lock, MY_MUTEX_INIT_FAST);
+ keycache->key_cache_shift= my_bit_log2(key_cache_block_size);
+ keycache->key_cache_mem_size= use_mem;
+ keycache->key_cache_block_size= key_cache_block_size;
+ DBUG_PRINT("info", ("key_cache_block_size: %u",
+ key_cache_block_size));
+ }
+
+ /*
+ These are safety deallocations: actually we always call the
+ function after having called end_key_cache that deallocates
+ these memory itself.
+ */
+ if (keycache->block_mem)
+ my_free_lock((gptr) keycache->block_mem, MYF(0));
+ keycache->block_mem= NULL;
+ if (keycache->block_root)
+ my_free((gptr) keycache->block_root, MYF(0));
+ keycache->block_root= NULL;
+
+ blocks= (uint) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) +
+ sizeof(HASH_LINK*) * 5/4 + key_cache_block_size));
/* It doesn't make sense to have too few blocks (less than 8) */
- if (blocks >= 8 && my_disk_blocks < 0)
+ if (blocks >= 8 && keycache->disk_blocks < 0)
{
- for (;;)
+ for ( ; ; )
{
/* Set my_hash_entries to the next bigger 2 power */
- if ((my_hash_entries=next_power(blocks)) < blocks*5/4)
- my_hash_entries<<=1;
- hash_links=2*blocks;
+ if ((keycache->hash_entries= next_power(blocks)) < blocks * 5/4)
+ keycache->hash_entries<<= 1;
+ hash_links= 2 * blocks;
#if defined(MAX_THREADS)
if (hash_links < MAX_THREADS + blocks - 1)
- hash_links=MAX_THREADS + blocks - 1;
+ hash_links= MAX_THREADS + blocks - 1;
#endif
- while ((length=(ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))+
- ALIGN_SIZE(hash_links*sizeof(HASH_LINK))+
- ALIGN_SIZE(sizeof(HASH_LINK*)*my_hash_entries)))+
- ((ulong) blocks << key_cache_shift) > use_mem)
+ while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) +
+ ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) +
+ ALIGN_SIZE(sizeof(HASH_LINK*) *
+ keycache->hash_entries))) +
+ ((ulong) blocks << keycache->key_cache_shift) > use_mem)
blocks--;
/* Allocate memory for cache page buffers */
- if ((my_block_mem=my_malloc_lock((ulong) blocks*key_cache_block_size,
- MYF(0))))
+ if ((keycache->block_mem=
+ my_malloc_lock((ulong) blocks * keycache->key_cache_block_size,
+ MYF(0))))
{
/*
Allocate memory for blocks, hash_links and hash entries;
For each block 2 hash links are allocated
*/
- if ((my_block_root=(BLOCK_LINK*) my_malloc((uint) length,MYF(0))))
+ if ((keycache->block_root= (BLOCK_LINK*) my_malloc((uint) length,
+ MYF(0))))
break;
- my_free_lock(my_block_mem,MYF(0));
+ my_free_lock(keycache->block_mem, MYF(0));
}
if (blocks < 8)
{
- my_errno=ENOMEM;
+ my_errno= ENOMEM;
goto err;
}
- blocks=blocks/4*3;
+ blocks= blocks / 4*3;
}
- my_disk_blocks=(int) blocks;
- my_hash_links=hash_links;
- my_hash_root= (HASH_LINK**) ((char*) my_block_root +
- ALIGN_SIZE(blocks*sizeof(BLOCK_LINK)));
- my_hash_link_root= (HASH_LINK*) ((char*) my_hash_root +
- ALIGN_SIZE((sizeof(HASH_LINK*) *
- my_hash_entries)));
- bzero((byte*) my_block_root, my_disk_blocks*sizeof(BLOCK_LINK));
- bzero((byte*) my_hash_root, my_hash_entries*sizeof(HASH_LINK*));
- bzero((byte*) my_hash_link_root, my_hash_links*sizeof(HASH_LINK));
- my_hash_links_used=0;
- my_free_hash_list=NULL;
- my_blocks_used= my_blocks_changed=0;
+ keycache->disk_blocks= (int) blocks;
+ keycache->hash_links= hash_links;
+ keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root +
+ ALIGN_SIZE(blocks*sizeof(BLOCK_LINK)));
+ keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root +
+ ALIGN_SIZE((sizeof(HASH_LINK*) *
+ keycache->hash_entries)));
+ bzero((byte*) keycache->block_root,
+ keycache->disk_blocks * sizeof(BLOCK_LINK));
+ bzero((byte*) keycache->hash_root,
+ keycache->hash_entries * sizeof(HASH_LINK*));
+ bzero((byte*) keycache->hash_link_root,
+ keycache->hash_links * sizeof(HASH_LINK));
+ keycache->hash_links_used= 0;
+ keycache->free_hash_list= NULL;
+ keycache->blocks_used= keycache->blocks_changed= 0;
+ if (env)
+ env->blocks_used= env->blocks_changed= 0;
#if defined(KEYCACHE_DEBUG)
- my_blocks_available=0;
+ keycache->blocks_available=0;
#endif
/* The LRU chain is empty after initialization */
- my_used_last=NULL;
-
- waiting_for_hash_link.last_thread=NULL;
- waiting_for_block.last_thread=NULL;
+ keycache->used_last= NULL;
+ keycache->used_ins= NULL;
+ keycache->keycache_time= 0;
+ keycache->warm_blocks= 0;
+ keycache->min_warm_blocks= env && env->division_limit ?
+ blocks * env->division_limit / 100 + 1 :
+ blocks;
+ keycache->age_threshold= env && env->age_threshold ?
+ blocks * env->age_threshold / 100 :
+ blocks;
+
+ keycache->waiting_for_hash_link.last_thread= NULL;
+ keycache->waiting_for_block.last_thread= NULL;
DBUG_PRINT("exit",
("disk_blocks: %d block_root: %lx hash_entries: %d hash_root: %lx \
hash_links: %d hash_link_root %lx",
- my_disk_blocks, my_block_root, my_hash_entries, my_hash_root,
- my_hash_links, my_hash_link_root));
+ keycache->disk_blocks, keycache->block_root,
+ keycache->hash_entries, keycache->hash_root,
+ keycache->hash_links, keycache->hash_link_root));
}
- bzero((gptr) changed_blocks,sizeof(changed_blocks[0])*CHANGED_BLOCKS_HASH);
- bzero((gptr) file_blocks,sizeof(file_blocks[0])*CHANGED_BLOCKS_HASH);
-
+ bzero((gptr) keycache->changed_blocks,
+ sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH);
+ bzero((gptr) keycache->file_blocks,
+ sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH);
+
+ if (env)
+ env->blocks= keycache->disk_blocks > 0 ? keycache->disk_blocks : 0;
+ *pkeycache= keycache;
DBUG_RETURN((int) blocks);
err:
- error=my_errno;
- if (my_block_mem)
- my_free_lock((gptr) my_block_mem,MYF(0));
- if (my_block_mem)
- my_free((gptr) my_block_root,MYF(0));
- my_errno=error;
+ error= my_errno;
+ keycache->disk_blocks= 0;
+ if (env)
+ env->blocks= 0;
+ if (keycache->block_mem)
+ {
+ my_free_lock((gptr) keycache->block_mem, MYF(0));
+ keycache->block_mem= NULL;
+ }
+ if (keycache->block_root)
+ {
+ my_free((gptr) keycache->block_root, MYF(0));
+ keycache->block_root= NULL;
+ }
+ my_errno= error;
DBUG_RETURN(0);
}
/*
- Resize the key cache
+ Resize a key cache
+
+ SYNOPSIS
+ resize_key_cache()
+ pkeycache in/out pointer to the key cache handle
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for the new key cache
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES.
+ The function first compares the memory size and the block size parameters
+ with the corresponding parameters of the key cache referred by
+ *pkeycache. If they differ the function free the the memory allocated
+ for the old key cache blocks by calling the end_key_cache function
+ and then rebuilds the key cache with new blocks by calling init_key_cache.
*/
-int resize_key_cache(ulong use_mem)
+
+int resize_key_cache(KEY_CACHE_HANDLE *pkeycache, uint key_cache_block_size,
+ ulong use_mem)
{
int blocks;
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- if (flush_all_key_blocks())
+ KEY_CACHE *keycache= *pkeycache;
+
+ if (key_cache_block_size == keycache->key_cache_block_size &&
+ use_mem == keycache->key_cache_mem_size)
+ return keycache->disk_blocks;
+
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (flush_all_key_blocks(keycache))
{
/* TODO: if this happens, we should write a warning in the log file ! */
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
return 0;
}
- end_key_cache();
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+ end_key_cache(pkeycache, 0);
/* the following will work even if memory is 0 */
- blocks=init_key_cache(use_mem);
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ blocks=init_key_cache(pkeycache, key_cache_block_size, use_mem,
+ keycache->env);
return blocks;
}
/*
+ Change the key cache parameters
+
+ SYNOPSIS
+ change_key_cache_param()
+ keycache the key cache handle
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ Presently the function resets the key cache parameters
+ concerning midpoint insertion strategy - division_limit and
+ age_threshold. It corresponding values are passed through
+ the keycache->env structure.
+*/
+
+void change_key_cache_param(KEY_CACHE_HANDLE keycache)
+{
+ KEY_CACHE_VAR *env= keycache->env;
+ DBUG_ENTER("change_key_cache_param");
+
+ if (!env)
+ return;
+ if (env->division_limit)
+ keycache->min_warm_blocks= keycache->disk_blocks *
+ env->division_limit / 100 + 1;
+ if (env->age_threshold)
+ keycache->age_threshold= keycache->disk_blocks *
+ env->age_threshold / 100;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
Remove key_cache from memory
+
+ SYNOPSIS
+ end_key_cache()
+ pkeycache in/out pointer to the key cache handle
+ cleanup <-> the key cache data structure is freed as well
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ If the cleanup parameter is TRUE the data structure with all associated
+ elements are freed completely and NULL is assigned to *pkeycache.
+ Otherwise only memory used by the key cache blocks is freed.
*/
-void end_key_cache(void)
+void end_key_cache(KEY_CACHE_HANDLE *pkeycache, my_bool cleanup)
{
+ KEY_CACHE *keycache= *pkeycache;
+ KEY_CACHE_VAR *env= keycache->env;
DBUG_ENTER("end_key_cache");
- if (my_disk_blocks > 0)
+ if (keycache->disk_blocks > 0)
{
- if (my_block_mem)
+ if (keycache->block_mem)
{
- my_free_lock((gptr) my_block_mem,MYF(0));
- my_free((gptr) my_block_root,MYF(0));
+ my_free_lock((gptr) keycache->block_mem, MYF(0));
+ keycache->block_mem= NULL;
+ my_free((gptr) keycache->block_root, MYF(0));
+ keycache->block_root= NULL;
}
- my_disk_blocks= -1;
+ keycache->disk_blocks= -1;
}
KEYCACHE_DEBUG_CLOSE;
- key_cache_inited=0;
- DBUG_PRINT("status",
- ("used: %d changed: %d w_requests: %ld \
- writes: %ld r_requests: %ld reads: %ld",
- my_blocks_used, my_blocks_changed, my_cache_w_requests,
- my_cache_write, my_cache_r_requests, my_cache_read));
+ keycache->key_cache_inited= 0;
+ if (env)
+ DBUG_PRINT("status",
+ ("used: %d changed: %d w_requests: %ld \
+ writes: %ld r_requests: %ld reads: %ld",
+ env->blocks_used, env->blocks_changed,
+ env->cache_w_requests, env->cache_write,
+ env->cache_r_requests, env->cache_read));
+ if (cleanup)
+ {
+ pthread_mutex_destroy(&keycache->cache_lock);
+ my_free((gptr) *pkeycache, MYF(0));
+ *pkeycache= NULL;
+ }
DBUG_VOID_RETURN;
} /* end_key_cache */
/*
- Link a thread into double-linked queue of waiting threads
+ Link a thread into double-linked queue of waiting threads.
+
+ SYNOPSIS
+ link_into_queue()
+ wqueue pointer to the queue structure
+ thread pointer to the thread to be added to the queue
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ Queue is represented by a circular list of the thread structures
+ The list is double-linked of the type (**prev,*next), accessed by
+ a pointer to the last element.
*/
static inline void link_into_queue(KEYCACHE_WQUEUE *wqueue,
struct st_my_thread_var *thread)
{
struct st_my_thread_var *last;
- if (! (last=wqueue->last_thread))
+ if (! (last= wqueue->last_thread))
{
/* Queue is empty */
- thread->next=thread;
- thread->prev=&thread->next;
+ thread->next= thread;
+ thread->prev= &thread->next;
}
else
{
- thread->prev=last->next->prev;
- last->next->prev=&thread->next;
- thread->next=last->next;
- last->next=thread;
+ thread->prev= last->next->prev;
+ last->next->prev= &thread->next;
+ thread->next= last->next;
+ last->next= thread;
}
- wqueue->last_thread=thread;
+ wqueue->last_thread= thread;
}
/*
Unlink a thread from double-linked queue of waiting threads
+
+ SYNOPSIS
+ unlink_from_queue()
+ wqueue pointer to the queue structure
+ thread pointer to the thread to be removed from the queue
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ See NOTES for link_into_queue
*/
static inline void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
@@ -462,40 +659,66 @@ static inline void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
KEYCACHE_DBUG_PRINT("unlink_from_queue", ("thread %ld", thread->id));
if (thread->next == thread)
/* The queue contains only one member */
- wqueue->last_thread=NULL;
+ wqueue->last_thread= NULL;
else
{
- thread->next->prev=thread->prev;
+ thread->next->prev= thread->prev;
*thread->prev=thread->next;
if (wqueue->last_thread == thread)
- wqueue->last_thread=STRUCT_PTR(struct st_my_thread_var, next,
- thread->prev);
+ wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next,
+ thread->prev);
}
- thread->next=NULL;
+ thread->next= NULL;
}
/*
Add a thread to single-linked queue of waiting threads
+
+ SYNOPSIS
+ add_to_queue()
+ wqueue pointer to the queue structure
+ thread pointer to the thread to be added to the queue
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ Queue is represented by a circular list of the thread structures
+ The list is single-linked of the type (*next), accessed by a pointer
+ to the last element.
*/
static inline void add_to_queue(KEYCACHE_WQUEUE *wqueue,
struct st_my_thread_var *thread)
{
struct st_my_thread_var *last;
- if (! (last=wqueue->last_thread))
- thread->next=thread;
+ if (! (last= wqueue->last_thread))
+ thread->next= thread;
else
{
- thread->next=last->next;
- last->next=thread;
+ thread->next= last->next;
+ last->next= thread;
}
- wqueue->last_thread=thread;
+ wqueue->last_thread= thread;
}
/*
Remove all threads from queue signaling them to proceed
+
+ SYNOPSIS
+ realease_queue()
+ wqueue pointer to the queue structure
+ thread pointer to the thread to be added to the queue
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ See notes for add_to_queue
+ When removed from the queue each thread is signaled via condition
+ variable thread->suspend.
*/
static void release_queue(KEYCACHE_WQUEUE *wqueue)
@@ -509,10 +732,10 @@ static void release_queue(KEYCACHE_WQUEUE *wqueue)
keycache_pthread_cond_signal(&thread->suspend);
KEYCACHE_DBUG_PRINT("release_queue: signal", ("thread %ld", thread->id));
next=thread->next;
- thread->next=NULL;
+ thread->next= NULL;
}
while (thread != last);
- wqueue->last_thread=NULL;
+ wqueue->last_thread= NULL;
}
@@ -523,8 +746,8 @@ static void release_queue(KEYCACHE_WQUEUE *wqueue)
static inline void unlink_changed(BLOCK_LINK *block)
{
if (block->next_changed)
- block->next_changed->prev_changed=block->prev_changed;
- *block->prev_changed=block->next_changed;
+ block->next_changed->prev_changed= block->prev_changed;
+ *block->prev_changed= block->next_changed;
}
@@ -534,10 +757,10 @@ static inline void unlink_changed(BLOCK_LINK *block)
static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead)
{
- block->prev_changed=phead;
- if ((block->next_changed=*phead))
+ block->prev_changed= phead;
+ if ((block->next_changed= *phead))
(*phead)->prev_changed= &block->next_changed;
- *phead=block;
+ *phead= block;
}
@@ -546,16 +769,18 @@ static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead)
and link it to the chain of clean blocks for the specified file
*/
-static void link_to_file_list(BLOCK_LINK *block,int file,
- my_bool unlink)
+static void link_to_file_list(KEY_CACHE *keycache,
+ BLOCK_LINK *block, int file, my_bool unlink)
{
if (unlink)
unlink_changed(block);
- link_changed(block,&file_blocks[FILE_HASH(file)]);
+ link_changed(block, &keycache->file_blocks[FILE_HASH(file)]);
if (block->status & BLOCK_CHANGED)
{
- block->status&=~BLOCK_CHANGED;
- my_blocks_changed--;
+ block->status&= ~BLOCK_CHANGED;
+ keycache->blocks_changed--;
+ if (keycache->env)
+ keycache->env->blocks_changed--;
}
}
@@ -565,33 +790,74 @@ static void link_to_file_list(BLOCK_LINK *block,int file,
file and link it to the chain of dirty blocks for this file
*/
-static inline void link_to_changed_list(BLOCK_LINK *block)
+static inline void link_to_changed_list(KEY_CACHE *keycache,
+ BLOCK_LINK *block)
{
unlink_changed(block);
- link_changed(block,&changed_blocks[FILE_HASH(block->hash_link->file)]);
+ link_changed(block,
+ &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]);
block->status|=BLOCK_CHANGED;
- my_blocks_changed++;
+ keycache->blocks_changed++;
+ if (keycache->env)
+ keycache->env->blocks_changed++;
}
/*
- Link a block to the LRU chain at the beginning or at the end
+ Link a block to the LRU chain at the beginning or at the end of
+ one of two parts.
+
+ SYNOPSIS
+ link_block()
+ keycache pointer to a key cache data structure
+ block pointer to the block to link to the LRU chain
+ hot <-> to link the block into the hot subchain
+ at_end <-> to link the block at the end of the subchain
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ The LRU chain is represented by a curcular list of block structures.
+ The list is double-linked of the type (**prev,*next) type.
+ The LRU chain is divided into two parts - hot and warm.
+ There are two pointers to access the last blocks of these two
+ parts. The beginning of the warm part follows right after the
+ end of the hot part.
+ Only blocks of the warm part can be used for replacement.
+ The first block from the beginning of this subchain is always
+ taken for eviction (keycache->last_used->next)
+
+ LRU chain: +------+ H O T +------+
+ +----| end |----...<----| beg |----+
+ | +------+last +------+ |
+ v<-link in latest hot (new end) |
+ | link in latest warm (new end)->^
+ | +------+ W A R M +------+ |
+ +----| beg |---->...----| end |----+
+ +------+ +------+ins
+ first for eviction
*/
-static void link_block(BLOCK_LINK *block, my_bool at_end)
-{
+static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot,
+ my_bool at_end)
+{
+ BLOCK_LINK *ins;
+ BLOCK_LINK **pins;
+
KEYCACHE_DBUG_ASSERT(! (block->hash_link && block->hash_link->requests));
- if (waiting_for_block.last_thread) {
- /* Signal that in the LRU chain an available block has appeared */
- struct st_my_thread_var *last_thread=waiting_for_block.last_thread;
- struct st_my_thread_var *first_thread=last_thread->next;
- struct st_my_thread_var *next_thread=first_thread;
+ if (!hot && keycache->waiting_for_block.last_thread) {
+ /* Signal that in the LRU warm sub-chain an available block has appeared */
+ struct st_my_thread_var *last_thread=
+ keycache->waiting_for_block.last_thread;
+ struct st_my_thread_var *first_thread= last_thread->next;
+ struct st_my_thread_var *next_thread= first_thread;
HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info;
struct st_my_thread_var *thread;
do
{
- thread=next_thread;
- next_thread=thread->next;
+ thread= next_thread;
+ next_thread= thread->next;
/*
We notify about the event all threads that ask
for the same page as the first thread in the queue
@@ -599,74 +865,90 @@ static void link_block(BLOCK_LINK *block, my_bool at_end)
if ((HASH_LINK *) thread->opt_info == hash_link)
{
keycache_pthread_cond_signal(&thread->suspend);
- unlink_from_queue(&waiting_for_block, thread);
+ unlink_from_queue(&keycache->waiting_for_block, thread);
block->requests++;
}
}
while (thread != last_thread);
- hash_link->block=block;
+ hash_link->block= block;
KEYCACHE_THREAD_TRACE("link_block: after signaling");
#if defined(KEYCACHE_DEBUG)
KEYCACHE_DBUG_PRINT("link_block",
("linked,unlinked block %u status=%x #requests=%u #available=%u",
- BLOCK_NUMBER(block),block->status,
- block->requests, my_blocks_available));
+ BLOCK_NUMBER(block), block->status,
+ block->requests, keycache->blocks_available));
#endif
return;
}
- if (my_used_last)
+ pins= hot ? &keycache->used_ins : &keycache->used_last;
+ ins= *pins;
+ if (ins)
{
- my_used_last->next_used->prev_used=&block->next_used;
- block->next_used= my_used_last->next_used;
- block->prev_used= &my_used_last->next_used;
- my_used_last->next_used=block;
+ ins->next_used->prev_used= &block->next_used;
+ block->next_used= ins->next_used;
+ block->prev_used= &ins->next_used;
+ ins->next_used= block;
if (at_end)
- my_used_last=block;
+ *pins= block;
}
else
{
/* The LRU chain is empty */
- my_used_last=block->next_used=block;
- block->prev_used=&block->next_used;
+ keycache->used_last= keycache->used_ins= block->next_used= block;
+ block->prev_used= &block->next_used;
}
KEYCACHE_THREAD_TRACE("link_block");
#if defined(KEYCACHE_DEBUG)
- my_blocks_available++;
+ keycache->blocks_available++;
KEYCACHE_DBUG_PRINT("link_block",
("linked block %u:%1u status=%x #requests=%u #available=%u",
- BLOCK_NUMBER(block),at_end,block->status,
- block->requests, my_blocks_available));
- KEYCACHE_DBUG_ASSERT(my_blocks_available <= my_blocks_used);
+ BLOCK_NUMBER(block), at_end, block->status,
+ block->requests, keycache->blocks_available));
+ KEYCACHE_DBUG_ASSERT((ulong) keycache->blocks_available <=
+ keycache->blocks_used);
#endif
}
/*
Unlink a block from the LRU chain
+
+ SYNOPSIS
+ unlink_block()
+ keycache pointer to a key cache data structure
+ block pointer to the block to unlink from the LRU chain
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ See NOTES for link_block
*/
-static void unlink_block(BLOCK_LINK *block)
+static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block)
{
if (block->next_used == block)
/* The list contains only one member */
- my_used_last=NULL;
+ keycache->used_last= keycache->used_ins= NULL;
else
{
- block->next_used->prev_used=block->prev_used;
- *block->prev_used=block->next_used;
- if (my_used_last == block)
- my_used_last=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
- }
- block->next_used=NULL;
+ block->next_used->prev_used= block->prev_used;
+ *block->prev_used= block->next_used;
+ if (keycache->used_last == block)
+ keycache->used_last= STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
+ if (keycache->used_ins == block)
+ keycache->used_ins=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
+ }
+ block->next_used= NULL;
KEYCACHE_THREAD_TRACE("unlink_block");
#if defined(KEYCACHE_DEBUG)
- my_blocks_available--;
+ keycache->blocks_available--;
KEYCACHE_DBUG_PRINT("unlink_block",
("unlinked block %u status=%x #requests=%u #available=%u",
- BLOCK_NUMBER(block),block->status,
- block->requests, my_blocks_available));
- KEYCACHE_DBUG_ASSERT(my_blocks_available >= 0);
+ BLOCK_NUMBER(block), block->status,
+ block->requests, keycache->blocks_available));
+ KEYCACHE_DBUG_ASSERT(keycache->blocks_available >= 0);
#endif
}
@@ -674,11 +956,11 @@ static void unlink_block(BLOCK_LINK *block)
/*
Register requests for a block
*/
-static void reg_requests(BLOCK_LINK *block, int count)
+static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count)
{
if (! block->requests)
/* First request for the block unlinks it */
- unlink_block(block);
+ unlink_block(keycache, block);
block->requests+=count;
}
@@ -686,12 +968,62 @@ static void reg_requests(BLOCK_LINK *block, int count)
/*
Unregister request for a block
linking it to the LRU chain if it's the last request
+
+ SYNOPSIS
+
+ unreg_block()
+ keycache pointer to a key cache data structure
+ block pointer to the block to link to the LRU chain
+ at_end <-> to link the block at the end of the LRU chain
+
+ RETURN VALUE
+ none
+
+ NOTES.
+ Every linking to the LRU chain decrements by one a special block
+ counter (if it's positive). If the at_end parameter is TRUE the block is
+ added either at the end of warm sub-chain or at the end of hot sub-chain.
+ It is added to the hot subchain if its counter is zero and number of
+ blocks in warm sub-chain is not less than some low limit (determined by
+ the division_limit parameter). Otherwise the block is added to the warm
+ sub-chain. If the at_end parameter is FALSE the block is always added
+ at beginning of the warm sub-chain.
+ Thus a warm block can be promoted to the hot sub-chain when its counter
+ becomes zero for the first time.
+ At the same time the block at the very beginning of the hot subchain
+ might be moved to the beginning of the warm subchain if it stays untouched
+ for a too long time (this time is determined by parameter age_threshold).
*/
-static inline void unreg_request(BLOCK_LINK *block, int at_end)
+static inline void unreg_request(KEY_CACHE *keycache,
+ BLOCK_LINK *block, int at_end)
{
if (! --block->requests)
- link_block(block, (my_bool)at_end);
+ {
+ my_bool hot;
+ if (block->hits_left)
+ block->hits_left--;
+ hot= !block->hits_left && at_end &&
+ keycache->warm_blocks > keycache->min_warm_blocks;
+ if (hot)
+ {
+ keycache->warm_blocks--;
+ KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks=%u",
+ keycache->warm_blocks));
+ }
+ link_block(keycache, block, hot, (my_bool)at_end);
+ block->last_hit_time= keycache->keycache_time;
+ if (++keycache->keycache_time - keycache->used_ins->last_hit_time >
+ keycache->age_threshold)
+ {
+ block= keycache->used_ins;
+ unlink_block(keycache, block);
+ link_block(keycache, block, 0, 0);
+ keycache->warm_blocks++;
+ KEYCACHE_DBUG_PRINT("unreg_request", ("#warm_blocks=%u",
+ keycache->warm_blocks));
+ }
+ }
}
/*
@@ -709,14 +1041,15 @@ static inline void remove_reader(BLOCK_LINK *block)
Wait until the last reader of the page in block
signals on its termination
*/
-static inline void wait_for_readers(BLOCK_LINK *block)
+
+static inline void wait_for_readers(KEY_CACHE *keycache, BLOCK_LINK *block)
{
struct st_my_thread_var *thread=my_thread_var;
while (block->hash_link->requests)
{
- block->condvar=&thread->suspend;
- keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache);
- block->condvar=NULL;
+ block->condvar= &thread->suspend;
+ keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
+ block->condvar= NULL;
}
}
@@ -728,10 +1061,10 @@ static inline void wait_for_readers(BLOCK_LINK *block)
static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link)
{
if (*start)
- (*start)->prev=&hash_link->next;
- hash_link->next=*start;
- hash_link->prev=start;
- *start=hash_link;
+ (*start)->prev= &hash_link->next;
+ hash_link->next= *start;
+ hash_link->prev= start;
+ *start= hash_link;
}
@@ -739,31 +1072,32 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link)
Remove a hash link from the hash table
*/
-static void unlink_hash(HASH_LINK *hash_link)
+static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
{
KEYCACHE_DBUG_PRINT("unlink_hash", ("file %u, filepos %lu #requests=%u",
(uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests));
KEYCACHE_DBUG_ASSERT(hash_link->requests == 0);
- if ((*hash_link->prev=hash_link->next))
- hash_link->next->prev=hash_link->prev;
- hash_link->block=NULL;
- if (waiting_for_hash_link.last_thread)
+ if ((*hash_link->prev= hash_link->next))
+ hash_link->next->prev= hash_link->prev;
+ hash_link->block= NULL;
+ if (keycache->waiting_for_hash_link.last_thread)
{
/* Signal that A free hash link appeared */
- struct st_my_thread_var *last_thread=waiting_for_hash_link.last_thread;
- struct st_my_thread_var *first_thread=last_thread->next;
- struct st_my_thread_var *next_thread=first_thread;
+ struct st_my_thread_var *last_thread=
+ keycache->waiting_for_hash_link.last_thread;
+ struct st_my_thread_var *first_thread= last_thread->next;
+ struct st_my_thread_var *next_thread= first_thread;
KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info);
struct st_my_thread_var *thread;
- hash_link->file=first_page->file;
- hash_link->diskpos=first_page->filepos;
+ hash_link->file= first_page->file;
+ hash_link->diskpos= first_page->filepos;
do
{
KEYCACHE_PAGE *page;
- thread=next_thread;
+ thread= next_thread;
page= (KEYCACHE_PAGE *) thread->opt_info;
- next_thread=thread->next;
+ next_thread= thread->next;
/*
We notify about the event all threads that ask
for the same page as the first thread in the queue
@@ -771,16 +1105,17 @@ static void unlink_hash(HASH_LINK *hash_link)
if (page->file == hash_link->file && page->filepos == hash_link->diskpos)
{
keycache_pthread_cond_signal(&thread->suspend);
- unlink_from_queue(&waiting_for_hash_link, thread);
+ unlink_from_queue(&keycache->waiting_for_hash_link, thread);
}
}
while (thread != last_thread);
- link_hash(&my_hash_root[KEYCACHE_HASH(hash_link->file,
- hash_link->diskpos)], hash_link);
+ link_hash(&keycache->hash_root[KEYCACHE_HASH(hash_link->file,
+ hash_link->diskpos)],
+ hash_link);
return;
}
- hash_link->next= my_free_hash_list;
- my_free_hash_list=hash_link;
+ hash_link->next= keycache->free_hash_list;
+ keycache->free_hash_list= hash_link;
}
@@ -788,7 +1123,8 @@ static void unlink_hash(HASH_LINK *hash_link)
Get the hash link for a page
*/
-static HASH_LINK *get_hash_link(int file, my_off_t filepos)
+static HASH_LINK *get_hash_link(KEY_CACHE *keycache,
+ int file, my_off_t filepos)
{
reg1 HASH_LINK *hash_link, **start;
KEYCACHE_PAGE page;
@@ -805,9 +1141,9 @@ restart:
start contains the head of the bucket list,
hash_link points to the first member of the list
*/
- hash_link= *(start= &my_hash_root[KEYCACHE_HASH(file, filepos)]);
+ hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]);
#if defined(KEYCACHE_DEBUG)
- cnt=0;
+ cnt= 0;
#endif
/* Look for an element for the pair (file, filepos) in the bucket chain */
while (hash_link &&
@@ -816,45 +1152,47 @@ restart:
hash_link= hash_link->next;
#if defined(KEYCACHE_DEBUG)
cnt++;
- if (! (cnt <= my_hash_links_used))
+ if (! (cnt <= keycache->hash_links_used))
{
int i;
- for (i=0, hash_link=*start ;
- i < cnt ; i++, hash_link=hash_link->next)
+ for (i=0, hash_link= *start ;
+ i < cnt ; i++, hash_link= hash_link->next)
{
KEYCACHE_DBUG_PRINT("get_hash_link", ("file %u, filepos %lu",
(uint) hash_link->file,(ulong) hash_link->diskpos));
}
}
- KEYCACHE_DBUG_ASSERT(cnt <= my_hash_links_used);
+ KEYCACHE_DBUG_ASSERT(cnt <= keycache->hash_links_used);
#endif
}
if (! hash_link)
{
/* There is no hash link in the hash table for the pair (file, filepos) */
- if (my_free_hash_list)
+ if (keycache->free_hash_list)
{
- hash_link= my_free_hash_list;
- my_free_hash_list=hash_link->next;
+ hash_link= keycache->free_hash_list;
+ keycache->free_hash_list= hash_link->next;
}
- else if (my_hash_links_used < my_hash_links)
+ else if (keycache->hash_links_used < keycache->hash_links)
{
- hash_link= &my_hash_link_root[my_hash_links_used++];
+ hash_link= &keycache->hash_link_root[keycache->hash_links_used++];
}
else
{
/* Wait for a free hash link */
- struct st_my_thread_var *thread=my_thread_var;
+ struct st_my_thread_var *thread= my_thread_var;
KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting"));
- page.file=file; page.filepos=filepos;
+ page.file= file;
+ page.filepos= filepos;
thread->opt_info= (void *) &page;
- link_into_queue(&waiting_for_hash_link, thread);
- keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache);
- thread->opt_info=NULL;
+ link_into_queue(&keycache->waiting_for_hash_link, thread);
+ keycache_pthread_cond_wait(&thread->suspend,
+ &keycache->cache_lock);
+ thread->opt_info= NULL;
goto restart;
}
- hash_link->file=file;
- hash_link->diskpos=filepos;
+ hash_link->file= file;
+ hash_link->diskpos= filepos;
link_hash(start, hash_link);
}
/* Register the request for the page */
@@ -867,35 +1205,68 @@ restart:
/*
Get a block for the file page requested by a keycache read/write operation;
If the page is not in the cache return a free block, if there is none
- return the lru block after saving its buffer if the page is dirty
+ return the lru block after saving its buffer if the page is dirty.
+
+ SYNOPSIS
+
+ find_key_block()
+ keycache pointer to a key cache data structure
+ file handler for the file to read page from
+ filepos position of the page in the file
+ init_hits_left how initialize the block counter for the page
+ wrmode <-> get for writing
+ page_st out {PAGE_READ,PAGE_TO_BE_READ,PAGE_WAIT_TO_BE_READ}
+
+ RETURN VALUE
+ Pointer to the found block if successful, 0 - otherwise
+
+ NOTES.
+ For the page from file positioned at filepos the function checks whether
+ the page is in the key cache specified by the first parameter.
+ If this is the case it immediately returns the block.
+ If not, the function first chooses a block for this page. If there is
+ no not used blocks in the key cache yet, the function takes the block
+ at the very beginning of the warm sub-chain. It saves the page in that
+ block if it's dirty before returning the pointer to it.
+ The function returns in the page_st parameter the following values:
+ PAGE_READ - if page already in the block,
+ PAGE_TO_BE_READ - if it is to be read yet by the current thread
+ WAIT_TO_BE_READ - if it is to be read by another thread
+ If an error occurs THE BLOCK_ERROR bit is set in the block status.
+ It might happen that there are no blocks in LRU chain (in warm part) -
+ all blocks are unlinked for some read/write operations. Then the function
+ waits until first of this operations links any block back.
*/
-static BLOCK_LINK *find_key_block(int file, my_off_t filepos,
+static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
+ File file, my_off_t filepos,
+ int init_hits_left,
int wrmode, int *page_st)
{
HASH_LINK *hash_link;
BLOCK_LINK *block;
- int error=0;
+ int error= 0;
int page_status;
DBUG_ENTER("find_key_block");
KEYCACHE_THREAD_TRACE("find_key_block:begin");
DBUG_PRINT("enter", ("file %u, filepos %lu, wrmode %lu",
- (uint) file,(ulong) filepos,(uint) wrmode));
+ (uint) file, (ulong) filepos, (uint) wrmode));
KEYCACHE_DBUG_PRINT("find_key_block", ("file %u, filepos %lu, wrmode %lu",
- (uint) file,(ulong) filepos,(uint) wrmode));
+ (uint) file, (ulong) filepos, (uint) wrmode));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("check_keycache2",test_key_cache("start of find_key_block",0););
+ DBUG_EXECUTE("check_keycache2",
+ test_key_cache(keycache, "start of find_key_block", 0););
#endif
restart:
/* Find the hash link for the requested page (file, filepos) */
- hash_link=get_hash_link(file, filepos);
+ hash_link= get_hash_link(keycache, file, filepos);
- page_status=-1;
- if ((block=hash_link->block) &&
+ page_status= -1;
+ if ((block= hash_link->block) &&
block->hash_link == hash_link && (block->status & BLOCK_READ))
- page_status=PAGE_READ;
+ page_status= PAGE_READ;
if (page_status == PAGE_READ && (block->status & BLOCK_IN_SWITCH))
{
@@ -907,20 +1278,21 @@ restart:
all others are to be suspended, then resubmitted
*/
if (!wrmode && !(block->status & BLOCK_REASSIGNED))
- reg_requests(block,1);
+ reg_requests(keycache, block, 1);
else
{
hash_link->requests--;
KEYCACHE_DBUG_PRINT("find_key_block",
("request waiting for old page to be saved"));
{
- struct st_my_thread_var *thread=my_thread_var;
+ struct st_my_thread_var *thread= my_thread_var;
/* Put the request into the queue of those waiting for the old page */
add_to_queue(&block->wqueue[COND_FOR_SAVED], thread);
/* Wait until the request can be resubmitted */
do
{
- keycache_pthread_cond_wait(&thread->suspend, &THR_LOCK_keycache);
+ keycache_pthread_cond_wait(&thread->suspend,
+ &keycache->cache_lock);
}
while(thread->next);
}
@@ -936,23 +1308,29 @@ restart:
if (! block)
{
/* No block is assigned for the page yet */
- if (my_blocks_used < (uint) my_disk_blocks)
+ if (keycache->blocks_used < (uint) keycache->disk_blocks)
{
/* There are some never used blocks, take first of them */
- hash_link->block=block= &my_block_root[my_blocks_used];
- block->buffer=ADD_TO_PTR(my_block_mem,
- ((ulong) my_blocks_used*key_cache_block_size),
- byte*);
- block->status=0;
- block->length=0;
- block->offset=key_cache_block_size;
- block->requests=1;
- my_blocks_used++;
- link_to_file_list(block, file, 0);
- block->hash_link=hash_link;
- page_status=PAGE_TO_BE_READ;
+ hash_link->block= block= &keycache->block_root[keycache->blocks_used];
+ block->buffer= ADD_TO_PTR(keycache->block_mem,
+ ((ulong) keycache->blocks_used*
+ keycache->key_cache_block_size),
+ byte*);
+ block->status= 0;
+ block->length= 0;
+ block->offset= keycache->key_cache_block_size;
+ block->requests= 1;
+ keycache->blocks_used++;
+ if (keycache->env)
+ keycache->env->blocks_used++;
+ keycache->warm_blocks++;
+ block->hits_left= init_hits_left;
+ block->last_hit_time= 0;
+ link_to_file_list(keycache, block, file, 0);
+ block->hash_link= hash_link;
+ page_status= PAGE_TO_BE_READ;
KEYCACHE_DBUG_PRINT("find_key_block",
- ("got never used block %u",BLOCK_NUMBER(block)));
+ ("got never used block %u", BLOCK_NUMBER(block)));
}
else
{
@@ -963,58 +1341,62 @@ restart:
all of them must get the same block
*/
- if (! my_used_last)
+ if (! keycache->used_last)
{
- struct st_my_thread_var *thread=my_thread_var;
- thread->opt_info=(void *) hash_link;
- link_into_queue(&waiting_for_block, thread);
+ struct st_my_thread_var *thread= my_thread_var;
+ thread->opt_info= (void *) hash_link;
+ link_into_queue(&keycache->waiting_for_block, thread);
do
{
- keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache);
+ keycache_pthread_cond_wait(&thread->suspend,
+ &keycache->cache_lock);
}
while (thread->next);
- thread->opt_info=NULL;
+ thread->opt_info= NULL;
}
- block=hash_link->block;
+ block= hash_link->block;
if (! block)
{
/*
Take the first block from the LRU chain
unlinking it from the chain
*/
- block= my_used_last->next_used;
- reg_requests(block,1);
- hash_link->block=block;
+ block= keycache->used_last->next_used;
+ block->hits_left= init_hits_left;
+ block->last_hit_time= 0;
+ reg_requests(keycache, block,1);
+ hash_link->block= block;
}
if (block->hash_link != hash_link &&
! (block->status & BLOCK_IN_SWITCH) )
{
/* this is a primary request for a new page */
- block->status|=BLOCK_IN_SWITCH;
+ block->status|= BLOCK_IN_SWITCH;
KEYCACHE_DBUG_PRINT("find_key_block",
- ("got block %u for new page",BLOCK_NUMBER(block)));
+ ("got block %u for new page", BLOCK_NUMBER(block)));
if (block->status & BLOCK_CHANGED)
{
/* The block contains a dirty page - push it out of the cache */
- KEYCACHE_DBUG_PRINT("find_key_block",("block is dirty"));
+ KEYCACHE_DBUG_PRINT("find_key_block", ("block is dirty"));
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
/*
The call is thread safe because only the current
thread might change the block->hash_link value
*/
- error=my_pwrite(block->hash_link->file,block->buffer,
- block->length,block->hash_link->diskpos,
+ error=my_pwrite(block->hash_link->file, block->buffer,
+ block->length, block->hash_link->diskpos,
MYF(MY_NABP | MY_WAIT_IF_FULL));
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- my_cache_write++;
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (keycache->env)
+ keycache->env->cache_write++;
}
- block->status|=BLOCK_REASSIGNED;
+ block->status|= BLOCK_REASSIGNED;
if (block->hash_link)
{
/*
@@ -1023,20 +1405,21 @@ restart:
(we could have avoided this waiting, if we had read
a page in the cache in a sweep, without yielding control)
*/
- wait_for_readers(block);
+ wait_for_readers(keycache, block);
/* Remove the hash link for this page from the hash table */
- unlink_hash(block->hash_link);
+ unlink_hash(keycache, block->hash_link);
/* All pending requests for this page must be resubmitted */
if (block->wqueue[COND_FOR_SAVED].last_thread)
release_queue(&block->wqueue[COND_FOR_SAVED]);
}
- link_to_file_list(block, file, (my_bool)(block->hash_link ? 1 : 0));
- block->status=error? BLOCK_ERROR : 0;
- block->length=0;
- block->offset=key_cache_block_size;
- block->hash_link=hash_link;
- page_status=PAGE_TO_BE_READ;
+ link_to_file_list(keycache, block, file,
+ (my_bool)(block->hash_link ? 1 : 0));
+ block->status= error? BLOCK_ERROR : 0;
+ block->length= 0;
+ block->offset= keycache->key_cache_block_size;
+ block->hash_link= hash_link;
+ page_status= PAGE_TO_BE_READ;
KEYCACHE_DBUG_ASSERT(block->hash_link->block == block);
KEYCACHE_DBUG_ASSERT(hash_link->block->hash_link == hash_link);
@@ -1044,17 +1427,17 @@ restart:
else
{
/* This is for secondary requests for a new page only */
- page_status = block->hash_link == hash_link &&
- (block->status & BLOCK_READ) ?
- PAGE_READ : PAGE_WAIT_TO_BE_READ;
+ page_status= block->hash_link == hash_link &&
+ (block->status & BLOCK_READ) ?
+ PAGE_READ : PAGE_WAIT_TO_BE_READ;
}
}
-
- my_cache_read++;
+ if (keycache->env)
+ keycache->env->cache_read++;
}
else
{
- reg_requests(block,1);
+ reg_requests(keycache, block, 1);
page_status = block->hash_link == hash_link &&
(block->status & BLOCK_READ) ?
PAGE_READ : PAGE_WAIT_TO_BE_READ;
@@ -1068,7 +1451,8 @@ restart:
(uint) file,(ulong) filepos,(uint) page_status));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("check_keycache2",test_key_cache("end of find_key_block",0););
+ DBUG_EXECUTE("check_keycache2",
+ test_key_cache(keycache, "end of find_key_block",0););
#endif
KEYCACHE_THREAD_TRACE("find_key_block:end");
DBUG_RETURN(block);
@@ -1076,17 +1460,36 @@ restart:
/*
- Read into a key cache block buffer from disk;
- do not to report error when the size of successfully read
- portion is less than read_length, but not less than min_length
+ Read into a key cache block buffer from disk.
+
+ SYNOPSIS
+
+ read_block()
+ keycache pointer to a key cache data structure
+ block block to which buffer the data is to be read
+ read_length size of data to be read
+ min_length at least so much data must be read
+ primary <-> the current thread will read the data
+
+ RETURN VALUE
+ None
+
+ NOTES.
+ The function either reads a page data from file to the block buffer,
+ or waits until another thread reads it. What page to read is determined
+ by a block parameter - reference to a hash link for this page.
+ If an error occurs THE BLOCK_ERROR bit is set in the block status.
+ We do not report error when the size of successfully read
+ portion is less than read_length, but not less than min_length.
*/
-static void read_block(BLOCK_LINK *block, uint read_length,
+static void read_block(KEY_CACHE *keycache,
+ BLOCK_LINK *block, uint read_length,
uint min_length, my_bool primary)
{
uint got_length;
- /* On entry THR_LOCK_keycache is locked */
+ /* On entry cache_lock is locked */
KEYCACHE_THREAD_TRACE("read_block");
if (primary)
@@ -1100,16 +1503,16 @@ static void read_block(BLOCK_LINK *block, uint read_length,
("page to be read by primary request"));
/* Page is not in buffer yet, is to be read from disk */
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
- got_length=my_pread(block->hash_link->file,block->buffer,
- read_length,block->hash_link->diskpos,MYF(0));
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+ got_length= my_pread(block->hash_link->file, block->buffer,
+ read_length, block->hash_link->diskpos, MYF(0));
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
if (got_length < min_length)
- block->status|=BLOCK_ERROR;
+ block->status|= BLOCK_ERROR;
else
{
- block->status=BLOCK_READ;
- block->length=got_length;
+ block->status= BLOCK_READ;
+ block->length= got_length;
}
KEYCACHE_DBUG_PRINT("read_block",
("primary request: new page in cache"));
@@ -1126,12 +1529,13 @@ static void read_block(BLOCK_LINK *block, uint read_length,
KEYCACHE_DBUG_PRINT("read_block",
("secondary request waiting for new page to be read"));
{
- struct st_my_thread_var *thread=my_thread_var;
+ struct st_my_thread_var *thread= my_thread_var;
/* Put the request into a queue and wait until it can be processed */
- add_to_queue(&block->wqueue[COND_FOR_REQUESTED],thread);
+ add_to_queue(&block->wqueue[COND_FOR_REQUESTED], thread);
do
{
- keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache);
+ keycache_pthread_cond_wait(&thread->suspend,
+ &keycache->cache_lock);
}
while (thread->next);
}
@@ -1143,34 +1547,55 @@ static void read_block(BLOCK_LINK *block, uint read_length,
/*
Read a block of data from a cached file into a buffer;
- if return_buffer is set then the cache buffer is returned if
- it can be used;
- filepos must be a multiple of 'block_length', but it doesn't
- have to be a multiple of key_cache_block_size;
- returns adress from where data is read
+
+ SYNOPSIS
+
+ key_cache_read()
+ keycache pointer to a key cache data structure
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+ level determines the weight of the data
+ buff buffer to where the data must be placed
+ length length of the buffer
+ block_length length of the block in the key cache buffer
+ return_buffer return pointer to the key cache buffer with the data
+
+ RETURN VALUE
+ Returns address from where the data is placed if sucessful, 0 - otherwise.
+
+ NOTES.
+ The function ensures that a block of data of size length from file
+ positioned at filepos is in the buffers for some key cache blocks.
+ Then the function either copies the data into the buffer buff, or,
+ if return_buffer is TRUE, it just returns the pointer to the key cache
+ buffer with the data.
+ Filepos must be a multiple of 'block_length', but it doesn't
+ have to be a multiple of key_cache_block_size;
*/
-byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
+byte *key_cache_read(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
+ byte *buff, uint length,
uint block_length __attribute__((unused)),
int return_buffer __attribute__((unused)))
{
int error=0;
DBUG_ENTER("key_cache_read");
DBUG_PRINT("enter", ("file %u, filepos %lu, length %u",
- (uint) file,(ulong) filepos,length));
+ (uint) file, (ulong) filepos, length));
- if (my_disk_blocks > 0)
+ if (keycache->disk_blocks > 0)
{
/* Key cache is used */
reg1 BLOCK_LINK *block;
- uint offset= (uint) (filepos & (key_cache_block_size-1));
- byte *start=buff;
+ uint offset= (uint) (filepos & (keycache->key_cache_block_size-1));
+ byte *start= buff;
uint read_length;
uint status;
int page_st;
#ifndef THREAD
- if (block_length > key_cache_block_size || offset)
+ if (block_length > keycache->key_cache_block_size || offset)
return_buffer=0;
#endif
@@ -1178,16 +1603,18 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
filepos-= offset;
do
{
- read_length= length > key_cache_block_size ?
- key_cache_block_size : length;
+ read_length= length > keycache->key_cache_block_size ?
+ keycache->key_cache_block_size : length;
KEYCACHE_DBUG_ASSERT(read_length > 0);
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- my_cache_r_requests++;
- block=find_key_block(file,filepos,0,&page_st);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (keycache->env)
+ keycache->env->cache_r_requests++;
+ block=find_key_block(keycache, file, filepos, level, 0, &page_st);
if (block->status != BLOCK_ERROR && page_st != PAGE_READ)
{
/* The requested page is to be read into the block buffer */
- read_block(block,key_cache_block_size,read_length+offset,
+ read_block(keycache, block,
+ keycache->key_cache_block_size, read_length+offset,
(my_bool)(page_st == PAGE_TO_BE_READ));
}
else if (! (block->status & BLOCK_ERROR) &&
@@ -1198,28 +1625,28 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
this could only happen if we are using a file with
small key blocks and are trying to read outside the file
*/
- my_errno=-1;
- block->status|=BLOCK_ERROR;
+ my_errno= -1;
+ block->status|= BLOCK_ERROR;
}
- if (! ((status=block->status) & BLOCK_ERROR))
+ if (! ((status= block->status) & BLOCK_ERROR))
{
#ifndef THREAD
if (! return_buffer)
#endif
{
#if !defined(SERIALIZED_READ_FROM_CACHE)
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
#endif
/* Copy data from the cache buffer */
if (!(read_length & 511))
- bmove512(buff,block->buffer+offset,read_length);
+ bmove512(buff, block->buffer+offset, read_length);
else
- memcpy(buff,block->buffer+offset,(size_t) read_length);
+ memcpy(buff, block->buffer+offset, (size_t) read_length);
#if !defined(SERIALIZED_READ_FROM_CACHE)
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
#endif
}
}
@@ -1229,9 +1656,9 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
Link the block into the LRU chain
if it's the last submitted request for the block
*/
- unreg_request(block,1);
+ unreg_request(keycache, block, 1);
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
if (status & BLOCK_ERROR)
DBUG_RETURN((byte *) 0);
@@ -1241,19 +1668,24 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
return (block->buffer);
#endif
- buff+=read_length;
- filepos+=read_length;
- offset=0;
+ buff+= read_length;
+ filepos+= read_length;
+ offset= 0;
} while ((length-= read_length));
DBUG_RETURN(start);
}
/* Key cache is not used */
- statistic_increment(my_cache_r_requests,&THR_LOCK_keycache);
- statistic_increment(my_cache_read,&THR_LOCK_keycache);
- if (my_pread(file,(byte*) buff,length,filepos,MYF(MY_NABP)))
- error=1;
+ if (keycache->env)
+ {
+ statistic_increment(keycache->env->cache_r_requests,
+ &keycache->cache_lock);
+ statistic_increment(keycache->env->cache_read,
+ &keycache->cache_lock);
+ }
+ if (my_pread(file, (byte*) buff, length, filepos, MYF(MY_NABP)))
+ error= 1;
DBUG_RETURN(error? (byte*) 0 : buff);
}
@@ -1262,27 +1694,32 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length,
Insert a block of file data from a buffer into key cache
SYNOPSIS
- key_cache_insert()
- file file descriptor
- filepos file offset of the data from the buffer
- buff buffer with data to insert into key cache
- length length of the data in the buffer
+ key_cache_insert()
+ keycache pointer to a key cache data structure
+ file handler for the file to insert data from
+ filepos position of the block of data in the file to insert
+ level determines the weight of the data
+ buff buffer to read data from
+ length length of the data in the buffer
+
RETURN VALUE
- 0 if a success, 1 -otherwise.
+ 0 if a success, 1 - otherwise.
*/
-int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length)
+int key_cache_insert(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
+ byte *buff, uint length)
{
DBUG_ENTER("key_cache_insert");
DBUG_PRINT("enter", ("file %u, filepos %lu, length %u",
(uint) file,(ulong) filepos, length));
- if (my_disk_blocks > 0)
+ if (keycache->disk_blocks > 0)
{
/* Key cache is used */
reg1 BLOCK_LINK *block;
- uint offset= (uint) (filepos & (key_cache_block_size-1));
+ uint offset= (uint) (filepos & (keycache->key_cache_block_size-1));
uint read_length;
int page_st;
@@ -1290,17 +1727,18 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length)
filepos-= offset;
do
{
- read_length= length > key_cache_block_size ?
- key_cache_block_size : length;
+ read_length= length > keycache->key_cache_block_size ?
+ keycache->key_cache_block_size : length;
KEYCACHE_DBUG_ASSERT(read_length > 0);
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- my_cache_r_requests++;
- block=find_key_block(file, filepos, 0, &page_st);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (keycache->env)
+ keycache->env->cache_r_requests++;
+ block= find_key_block(keycache, file, filepos, level, 0, &page_st);
if (block->status != BLOCK_ERROR && page_st != PAGE_READ)
{
/* The requested page is to be read into the block buffer */
#if !defined(SERIALIZED_READ_FROM_CACHE)
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
#endif
/* Copy data from buff */
@@ -1310,7 +1748,7 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length)
memcpy(block->buffer+offset, buff, (size_t) read_length);
#if !defined(SERIALIZED_READ_FROM_CACHE)
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
#endif
block->status= BLOCK_READ;
block->length= read_length+offset;
@@ -1321,15 +1759,15 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length)
Link the block into the LRU chain
if it's the last submitted request for the block
*/
- unreg_request(block,1);
+ unreg_request(keycache, block, 1);
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
if (block->status & BLOCK_ERROR)
DBUG_RETURN(1);
- buff+=read_length;
- filepos+=read_length;
+ buff+= read_length;
+ filepos+= read_length;
offset=0;
} while ((length-= read_length));
@@ -1339,14 +1777,35 @@ int key_cache_insert(File file, my_off_t filepos, byte *buff, uint length)
/*
- Write a buffer into disk;
- filepos must be a multiple of 'block_length', but it doesn't
- have to be a multiple of key cache block size;
- if !dont_write then all dirty pages involved in writing should
- have been flushed from key cache before the function starts
+ Write a buffer into a cached file.
+
+ SYNOPSIS
+
+ key_cache_write()
+ keycache pointer to a key cache data structure
+ file handler for the file to write data to
+ filepos position in the file to write data to
+ level determines the weight of the data
+ buff buffer with the data
+ length length of the buffer
+ dont_write if is 0 then all dirty pages involved in writing
+ should have been flushed from key cache
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+
+ NOTES.
+ The function copies the data of size length from buff into buffers
+ for key cache blocks that are assigned to contain the portion of
+ the file starting with position filepos.
+ It ensures that this data is flushed to the file if dont_write is FALSE.
+ Filepos must be a multiple of 'block_length', but it doesn't
+ have to be a multiple of key_cache_block_size;
*/
-int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
+int key_cache_write(KEY_CACHE_HANDLE keycache,
+ File file, my_off_t filepos, int level,
+ byte *buff, uint length,
uint block_length __attribute__((unused)),
int dont_write)
{
@@ -1355,83 +1814,87 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
DBUG_ENTER("key_cache_write");
DBUG_PRINT("enter", ("file %u, filepos %lu, length %u block_length %u",
- (uint) file,(ulong) filepos,length,block_length));
+ (uint) file, (ulong) filepos, length, block_length));
if (!dont_write)
{
/* Force writing from buff into disk */
- statistic_increment(my_cache_write, &THR_LOCK_keycache);
- if (my_pwrite(file,buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL)))
+ if (keycache->env)
+ statistic_increment(keycache->env->cache_write,
+ &keycache->cache_lock);
+ if (my_pwrite(file, buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL)))
DBUG_RETURN(1);
}
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("check_keycache",test_key_cache("start of key_cache_write",1););
+ DBUG_EXECUTE("check_keycache",
+ test_key_cache(keycache, "start of key_cache_write", 1););
#endif
- if (my_disk_blocks > 0)
+ if (keycache->disk_blocks > 0)
{
/* Key cache is used */
uint read_length;
- uint offset= (uint) (filepos & (key_cache_block_size-1));
+ uint offset= (uint) (filepos & (keycache->key_cache_block_size-1));
int page_st;
/* Write data in key_cache_block_size increments */
filepos-= offset;
do
{
- read_length= length > key_cache_block_size ?
- key_cache_block_size : length;
+ read_length= length > keycache->key_cache_block_size ?
+ keycache->key_cache_block_size : length;
KEYCACHE_DBUG_ASSERT(read_length > 0);
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- my_cache_w_requests++;
- block=find_key_block(file, filepos, 1, &page_st);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (keycache->env)
+ keycache->env->cache_w_requests++;
+ block= find_key_block(keycache, file, filepos, level, 1, &page_st);
if (block->status != BLOCK_ERROR && page_st != PAGE_READ &&
- (offset || read_length < key_cache_block_size))
- read_block(block,
- offset + read_length >= key_cache_block_size?
- offset : key_cache_block_size,
+ (offset || read_length < keycache->key_cache_block_size))
+ read_block(keycache, block,
+ offset + read_length >= keycache->key_cache_block_size?
+ offset : keycache->key_cache_block_size,
offset,(my_bool)(page_st == PAGE_TO_BE_READ));
if (!dont_write)
{
/* buff has been written to disk at start */
if ((block->status & BLOCK_CHANGED) &&
- (!offset && read_length >= key_cache_block_size))
- link_to_file_list(block, block->hash_link->file, 1);
+ (!offset && read_length >= keycache->key_cache_block_size))
+ link_to_file_list(keycache, block, block->hash_link->file, 1);
}
else if (! (block->status & BLOCK_CHANGED))
- link_to_changed_list(block);
+ link_to_changed_list(keycache, block);
- set_if_smaller(block->offset,offset)
- set_if_bigger(block->length,read_length+offset);
+ set_if_smaller(block->offset, offset)
+ set_if_bigger(block->length, read_length+offset);
if (! (block->status & BLOCK_ERROR))
{
if (!(read_length & 511))
- bmove512(block->buffer+offset,buff,read_length);
+ bmove512(block->buffer+offset, buff, read_length);
else
- memcpy(block->buffer+offset,buff,(size_t) read_length);
+ memcpy(block->buffer+offset, buff, (size_t) read_length);
}
block->status|=BLOCK_READ;
/* Unregister the request */
block->hash_link->requests--;
- unreg_request(block,1);
+ unreg_request(keycache, block, 1);
if (block->status & BLOCK_ERROR)
{
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
- error=1;
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+ error= 1;
break;
}
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
- buff+=read_length;
- filepos+=read_length;
- offset=0;
+ buff+= read_length;
+ filepos+= read_length;
+ offset= 0;
} while ((length-= read_length));
}
@@ -1440,15 +1903,22 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
/* Key cache is not used */
if (dont_write)
{
- statistic_increment(my_cache_w_requests, &THR_LOCK_keycache);
- statistic_increment(my_cache_write, &THR_LOCK_keycache);
- if (my_pwrite(file,(byte*) buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL)))
+ if (keycache->env)
+ {
+ statistic_increment(keycache->env->cache_w_requests,
+ &keycache->cache_lock);
+ statistic_increment(keycache->env->cache_write,
+ &keycache->cache_lock);
+ }
+ if (my_pwrite(file, (byte*) buff, length, filepos,
+ MYF(MY_NABP | MY_WAIT_IF_FULL)))
error=1;
}
}
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("exec",test_key_cache("end of key_cache_write",1););
+ DBUG_EXECUTE("exec",
+ test_key_cache(keycache, "end of key_cache_write", 1););
#endif
DBUG_RETURN(error);
}
@@ -1460,27 +1930,27 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length,
and add it at the beginning of the LRU chain
*/
-static void free_block(BLOCK_LINK *block)
+static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
{
KEYCACHE_THREAD_TRACE("free block");
KEYCACHE_DBUG_PRINT("free_block",
("block %u to be freed",BLOCK_NUMBER(block)));
if (block->hash_link)
{
- block->status|=BLOCK_REASSIGNED;
- wait_for_readers(block);
- unlink_hash(block->hash_link);
+ block->status|= BLOCK_REASSIGNED;
+ wait_for_readers(keycache, block);
+ unlink_hash(keycache, block->hash_link);
}
unlink_changed(block);
- block->status=0;
- block->length=0;
- block->offset=key_cache_block_size;
+ block->status= 0;
+ block->length= 0;
+ block->offset= keycache->key_cache_block_size;
KEYCACHE_THREAD_TRACE("free block");
KEYCACHE_DBUG_PRINT("free_block",
("block is freed"));
- unreg_request(block,0);
- block->hash_link=NULL;
+ unreg_request(keycache, block, 0);
+ block->hash_link= NULL;
}
@@ -1496,51 +1966,56 @@ static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b)
free used blocks if requested
*/
-static int flush_cached_blocks(File file, BLOCK_LINK **cache,
+static int flush_cached_blocks(KEY_CACHE *keycache,
+ File file, BLOCK_LINK **cache,
BLOCK_LINK **end,
enum flush_type type)
{
int error;
- int last_errno=0;
- uint count=end-cache;
+ int last_errno= 0;
+ uint count= end-cache;
/* Don't lock the cache during the flush */
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
/*
As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH
we are guarunteed no thread will change them
*/
- qsort((byte*) cache,count,sizeof(*cache),(qsort_cmp) cmp_sec_link);
+ qsort((byte*) cache, count, sizeof(*cache), (qsort_cmp) cmp_sec_link);
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
for ( ; cache != end ; cache++)
{
BLOCK_LINK *block= *cache;
KEYCACHE_DBUG_PRINT("flush_cached_blocks",
("block %u to be flushed", BLOCK_NUMBER(block)));
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
- error=my_pwrite(file,block->buffer+block->offset,block->length,
- block->hash_link->diskpos,MYF(MY_NABP | MY_WAIT_IF_FULL));
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
- my_cache_write++;
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+ error= my_pwrite(file, block->buffer+block->offset, block->length,
+ block->hash_link->diskpos,
+ MYF(MY_NABP | MY_WAIT_IF_FULL));
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+ if (keycache->env)
+ keycache->env->cache_write++;
if (error)
{
block->status|= BLOCK_ERROR;
if (!last_errno)
- last_errno=errno ? errno : -1;
+ last_errno= errno ? errno : -1;
}
/* type will never be FLUSH_IGNORE_CHANGED here */
if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE))
{
- my_blocks_changed--;
- free_block(block);
+ keycache->blocks_changed--;
+ if (keycache->env)
+ keycache->env->blocks_changed--;
+ free_block(keycache, block);
}
else
{
- block->status&=~BLOCK_IN_FLUSH;
- link_to_file_list(block,file,1);
- unreg_request(block,1);
+ block->status&= ~BLOCK_IN_FLUSH;
+ link_to_file_list(keycache, block, file, 1);
+ unreg_request(keycache, block, 1);
}
}
@@ -1550,31 +2025,45 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache,
/*
Flush all blocks for a file to disk
-*/
-int flush_key_blocks(File file, enum flush_type type)
+ SYNOPSIS
+
+ flush_key_blocks()
+ keycache pointer to a key cache data structure
+ file handler for the file to flush to
+ flush_type type of the flush
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+ */
+
+int flush_key_blocks(KEY_CACHE_HANDLE keycache,
+ File file, enum flush_type type)
{
- int last_errno=0;
+ KEY_CACHE_VAR *env;
BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache;
+ int last_errno= 0;
+
DBUG_ENTER("flush_key_blocks");
DBUG_PRINT("enter",("file: %d blocks_used: %d blocks_changed: %d",
- file, my_blocks_used, my_blocks_changed));
+ file, keycache->blocks_used, keycache->blocks_changed));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
- DBUG_EXECUTE("check_keycache",test_key_cache("start of flush_key_blocks",0););
+ DBUG_EXECUTE("check_keycache",
+ test_key_cache(keycache, "start of flush_key_blocks", 0););
#endif
- keycache_pthread_mutex_lock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
- cache=cache_buff;
- if (my_disk_blocks > 0 &&
+ cache= cache_buff;
+ if (keycache->disk_blocks > 0 &&
(!my_disable_flush_key_blocks || type != FLUSH_KEEP))
{
/* Key cache exists and flush is not disabled */
- int error=0;
- uint count=0;
+ int error= 0;
+ uint count= 0;
BLOCK_LINK **pos,**end;
- BLOCK_LINK *first_in_switch=NULL;
+ BLOCK_LINK *first_in_switch= NULL;
BLOCK_LINK *block, *next;
#if defined(KEYCACHE_DEBUG)
uint cnt=0;
@@ -1586,37 +2075,38 @@ int flush_key_blocks(File file, enum flush_type type)
Count how many key blocks we have to cache to be able
to flush all dirty pages with minimum seek moves
*/
- for (block=changed_blocks[FILE_HASH(file)] ;
+ for (block= keycache->changed_blocks[FILE_HASH(file)] ;
block ;
- block=block->next_changed)
+ block= block->next_changed)
{
if (block->hash_link->file == file)
{
count++;
- KEYCACHE_DBUG_ASSERT(count<= my_blocks_used);
+ KEYCACHE_DBUG_ASSERT(count<= keycache->blocks_used);
}
}
/* Allocate a new buffer only if its bigger than the one we have */
if (count > FLUSH_CACHE &&
- !(cache=(BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,MYF(0))))
+ !(cache= (BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,
+ MYF(0))))
{
- cache=cache_buff;
- count=FLUSH_CACHE;
+ cache= cache_buff;
+ count= FLUSH_CACHE;
}
}
/* Retrieve the blocks and write them to a buffer to be flushed */
restart:
- end=(pos=cache)+count;
- for (block=changed_blocks[FILE_HASH(file)] ;
+ end= (pos= cache)+count;
+ for (block= keycache->changed_blocks[FILE_HASH(file)] ;
block ;
- block=next)
+ block= next)
{
#if defined(KEYCACHE_DEBUG)
cnt++;
- KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used);
+ KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used);
#endif
- next=block->next_changed;
+ next= block->next_changed;
if (block->hash_link->file == file)
{
/*
@@ -1632,7 +2122,7 @@ restart:
We care only for the blocks for which flushing was not
initiated by other threads as a result of page swapping
*/
- reg_requests(block,1);
+ reg_requests(keycache, block, 1);
if (type != FLUSH_IGNORE_CHANGED)
{
/* It's not a temporary file */
@@ -1642,7 +2132,8 @@ restart:
This happens only if there is not enough
memory for the big block
*/
- if ((error=flush_cached_blocks(file,cache,end,type)))
+ if ((error= flush_cached_blocks(keycache, file, cache,
+ end,type)))
last_errno=error;
/*
Restart the scan as some other thread might have changed
@@ -1651,47 +2142,50 @@ restart:
*/
goto restart;
}
- *pos++=block;
+ *pos++= block;
}
else
{
/* It's a temporary file */
- my_blocks_changed--;
- free_block(block);
+ keycache->blocks_changed--;
+ if (keycache->env)
+ keycache->env->blocks_changed--;
+ free_block(keycache, block);
}
}
else
{
/* Link the block into a list of blocks 'in switch' */
unlink_changed(block);
- link_changed(block,&first_in_switch);
+ link_changed(block, &first_in_switch);
}
}
}
if (pos != cache)
{
- if ((error=flush_cached_blocks(file,cache,pos,type)))
- last_errno=error;
+ if ((error= flush_cached_blocks(keycache, file, cache, pos, type)))
+ last_errno= error;
}
/* Wait until list of blocks in switch is empty */
while (first_in_switch)
{
#if defined(KEYCACHE_DEBUG)
- cnt=0;
+ cnt= 0;
#endif
- block=first_in_switch;
+ block= first_in_switch;
{
- struct st_my_thread_var *thread=my_thread_var;
+ struct st_my_thread_var *thread= my_thread_var;
add_to_queue(&block->wqueue[COND_FOR_SAVED], thread);
do
{
- keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache);
+ keycache_pthread_cond_wait(&thread->suspend,
+ &keycache->cache_lock);
}
while (thread->next);
}
#if defined(KEYCACHE_DEBUG)
cnt++;
- KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used);
+ KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used);
#endif
}
/* The following happens very seldom */
@@ -1700,34 +2194,39 @@ restart:
#if defined(KEYCACHE_DEBUG)
cnt=0;
#endif
- for (block=file_blocks[FILE_HASH(file)] ;
+ for (block= keycache->file_blocks[FILE_HASH(file)] ;
block ;
- block=next)
+ block= next)
{
#if defined(KEYCACHE_DEBUG)
cnt++;
- KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used);
+ KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used);
#endif
- next=block->next_changed;
+ next= block->next_changed;
if (block->hash_link->file == file &&
(! (block->status & BLOCK_CHANGED)
|| type == FLUSH_IGNORE_CHANGED))
{
- reg_requests(block,1);
- free_block(block);
+ reg_requests(keycache, block, 1);
+ free_block(keycache, block);
}
}
}
}
- keycache_pthread_mutex_unlock(&THR_LOCK_keycache);
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+
+ if (type == FLUSH_REMOVE && (env= keycache->env) && (env->action))
+ {
+ (*env->action)((void *) env);
+ }
#ifndef DBUG_OFF
- DBUG_EXECUTE("check_keycache",
- test_key_cache("end of flush_key_blocks",0););
+ DBUG_EXECUTE("check_keycache",
+ test_key_cache(keycache, "end of flush_key_blocks", 0););
#endif
if (cache != cache_buff)
- my_free((gptr) cache,MYF(0));
+ my_free((gptr) cache, MYF(0));
if (last_errno)
errno=last_errno; /* Return first error */
DBUG_RETURN(last_errno != 0);
@@ -1738,27 +2237,27 @@ restart:
Flush all blocks in the key cache to disk
*/
-static int flush_all_key_blocks()
+static int flush_all_key_blocks(KEY_CACHE *keycache)
{
#if defined(KEYCACHE_DEBUG)
uint cnt=0;
#endif
- while (my_blocks_changed > 0)
+ while (keycache->blocks_changed > 0)
{
BLOCK_LINK *block;
- for (block= my_used_last->next_used ; ; block=block->next_used)
+ for (block= keycache->used_last->next_used ; ; block=block->next_used)
{
if (block->hash_link)
{
#if defined(KEYCACHE_DEBUG)
cnt++;
- KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used);
+ KEYCACHE_DBUG_ASSERT(cnt <= keycache->blocks_used);
#endif
- if (flush_key_blocks(block->hash_link->file, FLUSH_RELEASE))
+ if (flush_key_blocks(keycache, block->hash_link->file, FLUSH_RELEASE))
return 1;
break;
}
- if (block == my_used_last)
+ if (block == keycache->used_last)
break;
}
}
@@ -1770,7 +2269,8 @@ static int flush_all_key_blocks()
/*
Test if disk-cache is ok
*/
-static void test_key_cache(const char *where __attribute__((unused)),
+static void test_key_cache(KEY_CACHE *keycache __attribute__((unused)),
+ const char *where __attribute__((unused)),
my_bool lock __attribute__((unused)))
{
/* TODO */
@@ -1783,10 +2283,10 @@ static void test_key_cache(const char *where __attribute__((unused)),
#define MAX_QUEUE_LEN 100
-static void keycache_dump()
+static void keycache_dump(KEY_CACHE *keycache)
{
FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w");
- struct st_my_thread_var *thread_var =my_thread_var;
+ struct st_my_thread_var *thread_var= my_thread_var;
struct st_my_thread_var *last;
struct st_my_thread_var *thread;
BLOCK_LINK *block;
@@ -1829,10 +2329,10 @@ static void keycache_dump()
}
while (thread != last);
- for (i=0 ; i< my_blocks_used ; i++)
+ for (i=0 ; i< keycache->blocks_used ; i++)
{
int j;
- block= &my_block_root[i];
+ block= &keycache->block_root[i];
hash_link= block->hash_link;
fprintf(keycache_dump_file,
"block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d\n",
@@ -1841,7 +2341,7 @@ static void keycache_dump()
for (j=0 ; j < 2; j++)
{
KEYCACHE_WQUEUE *wqueue=&block->wqueue[j];
- thread=last=wqueue->last_thread;
+ thread= last= wqueue->last_thread;
fprintf(keycache_dump_file, "queue #%d\n", j);
if (thread)
{
@@ -1858,16 +2358,16 @@ static void keycache_dump()
}
}
fprintf(keycache_dump_file, "LRU chain:");
- block= my_used_last;
+ block= keycache= used_last;
if (block)
{
do
{
- block=block->next_used;
+ block= block->next_used;
fprintf(keycache_dump_file,
"block:%u, ", BLOCK_NUMBER(block));
}
- while (block != my_used_last);
+ while (block != keycache->used_last);
}
fprintf(keycache_dump_file, "\n");
@@ -1893,8 +2393,8 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond,
/* Get current time */
gettimeofday(&now, &tz);
/* Prepare timeout value */
- timeout.tv_sec = now.tv_sec + KEYCACHE_TIMEOUT;
- timeout.tv_nsec = now.tv_usec * 1000; /* timeval uses microseconds. */
+ timeout.tv_sec= now.tv_sec + KEYCACHE_TIMEOUT;
+ timeout.tv_nsec= now.tv_usec * 1000; /* timeval uses microseconds. */
/* timespec uses nanoseconds. */
/* 1 nanosecond = 1000 micro seconds. */
KEYCACHE_THREAD_TRACE_END("started waiting");
@@ -1904,7 +2404,7 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond,
fprintf(keycache_debug_log, "waiting...\n");
fflush(keycache_debug_log);
#endif
- rc = pthread_cond_timedwait(cond, mutex, &timeout);
+ rc= pthread_cond_timedwait(cond, mutex, &timeout);
KEYCACHE_THREAD_TRACE_BEGIN("finished waiting");
#if defined(KEYCACHE_DEBUG)
if (rc == ETIMEDOUT)
@@ -1932,7 +2432,7 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond,
{
int rc;
KEYCACHE_THREAD_TRACE_END("started waiting");
- rc = pthread_cond_wait(cond, mutex);
+ rc= pthread_cond_wait(cond, mutex);
KEYCACHE_THREAD_TRACE_BEGIN("finished waiting");
return rc;
}
@@ -1945,7 +2445,7 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond,
static int keycache_pthread_mutex_lock(pthread_mutex_t *mutex)
{
int rc;
- rc=pthread_mutex_lock(mutex);
+ rc= pthread_mutex_lock(mutex);
KEYCACHE_THREAD_TRACE_BEGIN("");
return rc;
}
@@ -1962,7 +2462,7 @@ static int keycache_pthread_cond_signal(pthread_cond_t *cond)
{
int rc;
KEYCACHE_THREAD_TRACE("signal");
- rc=pthread_cond_signal(cond);
+ rc= pthread_cond_signal(cond);
return rc;
}
@@ -1971,7 +2471,7 @@ static int keycache_pthread_cond_broadcast(pthread_cond_t *cond)
{
int rc;
KEYCACHE_THREAD_TRACE("signal");
- rc=pthread_cond_broadcast(cond);
+ rc= pthread_cond_broadcast(cond);
return rc;
}
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 864ee55a85f..aa889b649d0 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -228,6 +228,12 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked)
{
if (!(file=mi_open(name, mode, test_if_locked)))
return (my_errno ? my_errno : -1);
+
+ /* Synchronize key cache assignment of the handler */
+ KEY_CACHE_VAR *key_cache= table->key_cache ? table->key_cache :
+ &dflt_key_cache_var;
+ VOID(mi_extra(file, HA_EXTRA_SET_KEY_CACHE,
+ (void*) &key_cache->cache));
if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE))
VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0));
@@ -692,6 +698,131 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
/*
+ Assign table indexes to a key cache.
+*/
+
+int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ uint len;
+ KEY_CACHE_VAR *old_key_cache;
+ KEY_CACHE_VAR *new_key_cache;
+ const char *errmsg=0;
+ int error= HA_ADMIN_OK;
+ ulonglong map= ~(ulonglong) 0;
+ TABLE_LIST *table_list= table->pos_in_table_list;
+ const char *new_key_cache_name= table_list->option ?
+ (const char *) table_list->option :
+ DEFAULT_KEY_CACHE_NAME;
+ KEY_CACHE_ASMT *key_cache_asmt= table->key_cache_asmt;
+ bool triggered= key_cache_asmt->triggered;
+
+ DBUG_ENTER("ha_myisam::assign_to_keycache");
+
+ VOID(pthread_mutex_lock(&LOCK_assign));
+
+ old_key_cache= key_cache_asmt->key_cache;
+
+ /* Check validity of the index references */
+ if (!triggered && table_list->use_index)
+ {
+ key_map kmap= get_key_map_from_key_list(table, table_list->use_index);
+ if (kmap == ~(key_map) 0)
+ {
+ errmsg= thd->net.last_error;
+ error= HA_ADMIN_FAILED;
+ goto err;
+ }
+ if (kmap)
+ map= kmap;
+ }
+
+ len= strlen(new_key_cache_name);
+ new_key_cache= get_or_create_key_cache(new_key_cache_name, len);
+ if (old_key_cache == new_key_cache)
+ {
+ /* Nothing to do: table is assigned to the same key cache */
+ goto ok;
+ }
+
+ if (!new_key_cache ||
+ (!new_key_cache->cache && ha_key_cache(new_key_cache)))
+ {
+ if (key_cache_asmt->triggered)
+ error= HA_ERR_OUT_OF_MEM;
+ else
+ {
+ char buf[ERRMSGSIZE];
+ my_snprintf(buf, ERRMSGSIZE,
+ "Failed to create key cache %s", new_key_cache_name);
+ errmsg= buf;
+ error= HA_ADMIN_FAILED;
+ }
+ goto err;
+ }
+
+ reassign_key_cache(key_cache_asmt, new_key_cache);
+
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+ error= mi_assign_to_keycache(file, map, new_key_cache, &LOCK_assign);
+ VOID(pthread_mutex_lock(&LOCK_assign));
+
+ if (error && !key_cache_asmt->triggered)
+ {
+ switch (error) {
+ default:
+ char buf[ERRMSGSIZE+20];
+ my_snprintf(buf, ERRMSGSIZE,
+ "Failed to flush to index file (errno: %d)", my_errno);
+ errmsg= buf;
+ }
+ error= HA_ADMIN_CORRUPT;
+ goto err;
+ }
+
+ goto ok;
+
+ err:
+ if (!triggered)
+ {
+ MI_CHECK param;
+ myisamchk_init(&param);
+ param.thd= thd;
+ param.op_name= (char*)"assign_to_keycache";
+ param.db_name= table->table_cache_key;
+ param.table_name= table->table_name;
+ param.testflag= 0;
+ mi_check_print_error(&param, errmsg);
+ }
+
+ ok:
+ if (--key_cache_asmt->requests)
+ {
+ /* There is a queue of assignments for the table */
+
+ /* Remove the first member from the queue */
+ struct st_my_thread_var *last= key_cache_asmt->queue;
+ struct st_my_thread_var *thread= last->next;
+ if (thread->next == thread)
+ key_cache_asmt->queue= 0;
+ else
+ {
+ last->next= thread->next;
+ last->next->prev= &last->next;
+ thread->next= 0;
+ }
+ /* Signal the first waiting thread to proceed */
+ VOID(pthread_cond_signal(&thread->suspend));
+ }
+
+ key_cache_asmt->triggered= 0;
+
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+
+ DBUG_RETURN(error);
+}
+
+
+/*
Preload pages of the index file for a table into the key cache.
*/
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index e4e3192af10..e8413ef0bd2 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -128,6 +128,7 @@ class ha_myisam: public handler
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int restore(THD* thd, HA_CHECK_OPT* check_opt);
int backup(THD* thd, HA_CHECK_OPT* check_opt);
+ int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
#ifdef HAVE_REPLICATION
int dump(THD* thd, int fd);
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index a0449e83222..d6d4f58b31c 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -45,6 +45,12 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
DBUG_PRINT("info", ("ha_myisammrg::open exit %d", my_errno));
return (my_errno ? my_errno : -1);
}
+ /* Synchronize key cache assignment for the file */
+ KEY_CACHE_VAR *key_cache= table->key_cache ? table->key_cache :
+ &dflt_key_cache_var;
+ VOID(myrg_extra(file, HA_EXTRA_SET_KEY_CACHE,
+ (void*) &key_cache->cache));
+
DBUG_PRINT("info", ("ha_myisammrg::open myrg_extrafunc..."))
myrg_extrafunc(file, query_cache_invalidate_by_MyISAM_filename_ref);
if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED ||
diff --git a/sql/handler.cc b/sql/handler.cc
index a8e9f9cf50a..28f399818c4 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -736,6 +736,11 @@ int handler::analyze(THD* thd, HA_CHECK_OPT* check_opt)
return HA_ADMIN_NOT_IMPLEMENTED;
}
+int handler::assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ return HA_ADMIN_NOT_IMPLEMENTED;
+}
+
int handler::preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
@@ -1102,27 +1107,62 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
/* Use key cacheing on all databases */
-void ha_key_cache(void)
+int ha_key_cache(KEY_CACHE_VAR *key_cache)
{
- /*
- The following mutex is not really needed as long as keybuff_size is
- treated as a long value, but we use the mutex here to guard for future
- changes.
- */
- pthread_mutex_lock(&LOCK_global_system_variables);
- long tmp= (long) keybuff_size;
- pthread_mutex_unlock(&LOCK_global_system_variables);
- if (tmp)
- (void) init_key_cache(tmp);
+ if (!key_cache->cache)
+ {
+ /*
+ The following mutex is not really needed as long as keybuff_size is
+ treated as a long value, but we use the mutex here to guard for future
+ changes.
+ */
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ if (!key_cache->block_size)
+ key_cache->block_size= dflt_key_cache_block_size;
+ if (!key_cache->buff_size)
+ key_cache->buff_size= dflt_key_buff_size;
+ long tmp_buff_size= (long) key_cache->buff_size;
+ long tmp_block_size= (long) key_cache->block_size;
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+ return !init_key_cache(&key_cache->cache,
+ tmp_block_size,
+ tmp_buff_size,
+ key_cache);
+ }
+ return 0;
}
+int ha_resize_key_cache(KEY_CACHE_VAR *key_cache)
+{
+ if (key_cache->cache)
+ {
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ long tmp_buff_size= (long) key_cache->buff_size;
+ long tmp_block_size= (long) key_cache->block_size;
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+ return !resize_key_cache(&key_cache->cache, tmp_block_size,
+ tmp_buff_size);
+ }
+ return 0;
+}
+
+int ha_change_key_cache_param(KEY_CACHE_VAR *key_cache)
+{
+ if (key_cache->cache)
+ {
+ change_key_cache_param(key_cache->cache);
+ }
+ return 0;
+}
-void ha_resize_key_cache(void)
+int ha_end_key_cache(KEY_CACHE_VAR *key_cache)
{
- pthread_mutex_lock(&LOCK_global_system_variables);
- long tmp= (long) keybuff_size;
- pthread_mutex_unlock(&LOCK_global_system_variables);
- (void) resize_key_cache(tmp);
+ if (key_cache->cache)
+ {
+ end_key_cache(&key_cache->cache, 1);
+ return key_cache->cache ? 1 : 0;
+ }
+ return 0;
}
diff --git a/sql/handler.h b/sql/handler.h
index ad209e5cec9..8f0f2ef4e55 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -307,6 +307,7 @@ public:
virtual bool check_and_repair(THD *thd) {return 1;}
virtual int optimize(THD* thd,HA_CHECK_OPT* check_opt);
virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt);
+ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt);
/*
@@ -390,8 +391,10 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
bool update_create_info);
int ha_delete_table(enum db_type db_type, const char *path);
void ha_drop_database(char* path);
-void ha_key_cache(void);
-void ha_resize_key_cache(void);
+int ha_key_cache(KEY_CACHE_VAR *key_cache);
+int ha_resize_key_cache(KEY_CACHE_VAR *key_cache);
+int ha_change_key_cache_param(KEY_CACHE_VAR *key_cache);
+int ha_end_key_cache(KEY_CACHE_VAR *key_cache);
int ha_start_stmt(THD *thd);
int ha_report_binlog_offset_and_commit(THD *thd, char *log_file_name,
my_off_t end_offset);
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 42b73c48606..0522c1bcde2 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -245,7 +245,7 @@ public:
longlong val_int();
enum Functype functype() const { return NE_FUNC; }
cond_result eq_cmp_result() const { return COND_FALSE; }
- optimize_type select_optimize() const { return OPTIMIZE_NONE; }
+ optimize_type select_optimize() const { return OPTIMIZE_KEY; }
const char *func_name() const { return "<>"; }
};
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 1926314cef1..ce5c9ff0924 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -419,6 +419,10 @@ bool check_stack_overrun(THD *thd,char *dummy);
void table_cache_init(void);
void table_cache_free(void);
uint cached_tables(void);
+void assign_cache_init(void);
+void assign_cache_free(void);
+void reassign_key_cache(KEY_CACHE_ASMT *key_cache_asmt,
+ KEY_CACHE_VAR *new_key_cache);
void kill_mysql(void);
void close_connection(THD *thd, uint errcode, bool lock);
bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
@@ -448,7 +452,10 @@ int mysql_analyze_table(THD* thd, TABLE_LIST* table_list,
HA_CHECK_OPT* check_opt);
int mysql_optimize_table(THD* thd, TABLE_LIST* table_list,
HA_CHECK_OPT* check_opt);
+int mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list);
int mysql_preload_keys(THD* thd, TABLE_LIST* table_list);
+int reassign_keycache_tables(THD* thd, KEY_CACHE_VAR* src_cache,
+ char *dest_name, bool remove_fl);
bool check_simple_select();
@@ -821,7 +828,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
LOCK_error_log, LOCK_delayed_insert,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager,
- LOCK_global_system_variables, LOCK_user_conn;
+ LOCK_global_system_variables, LOCK_user_conn, LOCK_assign;
extern rw_lock_t LOCK_grant;
extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager;
extern pthread_attr_t connection_attrib;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index da7d3df0251..8b79431b823 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -307,8 +307,6 @@ ulong rpl_recovery_rank=0;
ulong my_bind_addr; /* the address we bind to */
volatile ulong cached_thread_count= 0;
-ulonglong keybuff_size;
-
double log_10[32]; /* 10 potences */
time_t start_time;
@@ -371,6 +369,7 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_global_system_variables,
+ LOCK_assign,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi;
rw_lock_t LOCK_grant;
pthread_cond_t COND_refresh,COND_thread_count, COND_slave_stopped,
@@ -897,6 +896,7 @@ void clean_up(bool print_message)
#endif
query_cache_destroy();
table_cache_free();
+ assign_cache_free();
hostname_cache_free();
item_user_lock_free();
lex_free(); /* Free some memory */
@@ -906,7 +906,8 @@ void clean_up(bool print_message)
udf_free();
#endif
(void) ha_panic(HA_PANIC_CLOSE); /* close all tables and logs */
- end_key_cache();
+ process_key_caches(&ha_end_key_cache);
+ ha_end_key_cache(&dflt_key_cache_var);
delete_elements(&key_caches, free_key_cache);
end_thr_alarm(1); /* Free allocated memory */
#ifdef USE_RAID
@@ -989,6 +990,7 @@ static void clean_up_mutexes()
#endif
(void) pthread_mutex_destroy(&LOCK_active_mi);
(void) pthread_mutex_destroy(&LOCK_global_system_variables);
+ (void) pthread_mutex_destroy(&LOCK_assign);
(void) pthread_cond_destroy(&COND_thread_count);
(void) pthread_cond_destroy(&COND_refresh);
(void) pthread_cond_destroy(&COND_thread_cache);
@@ -1558,14 +1560,15 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n",
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
and this may fail.\n\n");
- fprintf(stderr, "key_buffer_size=%lu\n", (ulong) keybuff_size);
+ fprintf(stderr, "key_buffer_size=%lu\n",
+ (ulong) dflt_key_cache_var.buff_size);
fprintf(stderr, "read_buffer_size=%ld\n", global_system_variables.read_buff_size);
fprintf(stderr, "max_used_connections=%ld\n", max_used_connections);
fprintf(stderr, "max_connections=%ld\n", max_connections);
fprintf(stderr, "threads_connected=%d\n", thread_count);
fprintf(stderr, "It is possible that mysqld could use up to \n\
key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %ld K\n\
-bytes of memory\n", ((ulong) keybuff_size +
+bytes of memory\n", ((ulong) dflt_key_cache_var.buff_size +
(global_system_variables.read_buff_size +
global_system_variables.sortbuff_size) *
max_connections)/ 1024);
@@ -2186,6 +2189,7 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_assign, MY_MUTEX_INIT_FAST);
(void) my_rwlock_init(&LOCK_grant, NULL);
(void) pthread_cond_init(&COND_thread_count,NULL);
(void) pthread_cond_init(&COND_refresh,NULL);
@@ -2237,6 +2241,7 @@ static int init_server_components()
{
DBUG_ENTER("init_server_components");
table_cache_init();
+ assign_cache_init();
hostname_cache_init();
query_cache_result_size_limit(query_cache_limit);
query_cache_set_min_res_unit(query_cache_min_res_unit);
@@ -2307,7 +2312,10 @@ Now disabling --log-slave-updates.");
}
if (opt_myisam_log)
(void) mi_log(1);
- ha_key_cache();
+
+ ha_key_cache(&dflt_key_cache_var);
+ process_key_caches(&ha_key_cache);
+
#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT)
if (locked_in_memory && !geteuid())
@@ -3573,7 +3581,9 @@ enum options_mysqld
OPT_FLUSH_TIME, OPT_FT_MIN_WORD_LEN,
OPT_FT_MAX_WORD_LEN, OPT_FT_MAX_WORD_LEN_FOR_SORT, OPT_FT_STOPWORD_FILE,
OPT_INTERACTIVE_TIMEOUT, OPT_JOIN_BUFF_SIZE,
- OPT_KEY_BUFFER_SIZE, OPT_LONG_QUERY_TIME,
+ OPT_KEY_BUFFER_SIZE, OPT_KEY_CACHE_BLOCK_SIZE,
+ OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_AGE_THRESHOLD,
+ OPT_LONG_QUERY_TIME,
OPT_LOWER_CASE_TABLE_NAMES, OPT_MAX_ALLOWED_PACKET,
OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS,
@@ -4286,10 +4296,26 @@ replicating a LOAD DATA INFILE command.",
IO_SIZE, 0},
{"key_buffer_size", OPT_KEY_BUFFER_SIZE,
"The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.",
- (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0,
+ (gptr*) &dflt_key_cache_var.buff_size,
+ (gptr*) &dflt_key_cache_var.buff_size, 0,
(enum get_opt_var_type) (GET_ULL | GET_ASK_ADDR),
REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
IO_SIZE, 0},
+ {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE,
+ "The default size of key cache blocks",
+ (gptr*) &dflt_key_cache_var.block_size,
+ (gptr*) &dflt_key_cache_var.block_size, 0, GET_ULONG,
+ REQUIRED_ARG, KEY_CACHE_BLOCK_SIZE , 512, 1024*16, MALLOC_OVERHEAD, 512, 0},
+ {"key_cache_division_limit", OPT_KEY_CACHE_DIVISION_LIMIT,
+ "The minimum percentage of warm blocks in key cache",
+ (gptr*) &dflt_key_cache_var.division_limit,
+ (gptr*) &dflt_key_cache_var.division_limit, 0, GET_ULONG,
+ REQUIRED_ARG, 100, 1, 100, 0, 1, 0},
+ {"key_cache_division_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD,
+ "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache",
+ (gptr*) &dflt_key_cache_var.age_threshold,
+ (gptr*) &dflt_key_cache_var.age_threshold, 0, GET_ULONG,
+ REQUIRED_ARG, 300, 100, ~0L, 0, 100, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time,
@@ -4707,13 +4733,19 @@ struct show_var_st status_vars[]= {
{"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG},
{"Handler_update", (char*) &ha_update_count, SHOW_LONG},
{"Handler_write", (char*) &ha_write_count, SHOW_LONG},
- {"Key_blocks_used", (char*) &my_blocks_used, SHOW_LONG_CONST},
- {"Key_read_requests", (char*) &my_cache_r_requests, SHOW_LONG},
- {"Key_reads", (char*) &my_cache_read, SHOW_LONG},
- {"Key_write_requests", (char*) &my_cache_w_requests, SHOW_LONG},
- {"Key_writes", (char*) &my_cache_write, SHOW_LONG},
+ {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used,
+ SHOW_LONG_CONST},
+ {"Key_read_requests", (char*) &dflt_key_cache_var.cache_r_requests,
+ SHOW_LONG},
+ {"Key_reads", (char*) &dflt_key_cache_var.cache_read,
+ SHOW_LONG},
+ {"Key_write_requests", (char*) &dflt_key_cache_var.cache_w_requests,
+ SHOW_LONG},
+ {"Key_writes", (char*) &dflt_key_cache_var.cache_write,
+ SHOW_LONG},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
- {"Not_flushed_key_blocks", (char*) &my_blocks_changed, SHOW_LONG_CONST},
+ {"Not_flushed_key_blocks", (char*) &dflt_key_cache_var.blocks_changed,
+ SHOW_LONG_CONST},
{"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST},
{"Open_tables", (char*) 0, SHOW_OPENTABLES},
{"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST},
@@ -4925,7 +4957,8 @@ static void mysql_init_variables(void)
threads.empty();
thread_cache.empty();
key_caches.empty();
- if (!get_or_create_key_cache("default", 7))
+ if (!get_or_create_key_cache(DEFAULT_KEY_CACHE_NAME,
+ strlen(DEFAULT_KEY_CACHE_NAME)))
exit(1);
/* Initialize structures that is used when processing options */
@@ -5551,21 +5584,28 @@ extern "C" gptr *
mysql_getopt_value(const char *keyname, uint key_length,
const struct my_option *option)
{
- if (!key_length)
- {
- keyname= "default";
- key_length= 7;
- }
switch (option->id) {
case OPT_KEY_BUFFER_SIZE:
+ case OPT_KEY_CACHE_BLOCK_SIZE:
+ case OPT_KEY_CACHE_DIVISION_LIMIT:
+ case OPT_KEY_CACHE_AGE_THRESHOLD:
{
- KEY_CACHE *key_cache;
+ KEY_CACHE_VAR *key_cache;
if (!(key_cache= get_or_create_key_cache(keyname, key_length)))
exit(1);
- return (gptr*) &key_cache->size;
+ switch (option->id) {
+ case OPT_KEY_BUFFER_SIZE:
+ return (gptr*) &key_cache->buff_size;
+ case OPT_KEY_CACHE_BLOCK_SIZE:
+ return (gptr*) &key_cache->block_size;
+ case OPT_KEY_CACHE_DIVISION_LIMIT:
+ return (gptr*) &key_cache->division_limit;
+ case OPT_KEY_CACHE_AGE_THRESHOLD:
+ return (gptr*) &key_cache->age_threshold;
+ }
}
}
- return option->value;
+ return option->value;
}
@@ -5629,16 +5669,16 @@ static void get_options(int argc,char **argv)
table_alias_charset= (lower_case_table_names ?
files_charset_info :
&my_charset_bin);
- /* QQ To be deleted when we have key cache variables in a struct */
- {
- NAMED_LIST *not_used;
- keybuff_size= (((KEY_CACHE *) find_named(&key_caches, "default", 7,
- &not_used))->size);
- }
if (opt_short_log_format)
opt_specialflag|= SPECIAL_SHORT_LOG_FORMAT;
if (opt_log_queries_not_using_indexes)
opt_specialflag|= SPECIAL_LOG_QUERIES_NOT_USING_INDEXES;
+ /* Set up default values for a key cache */
+ KEY_CACHE_VAR *key_cache= &dflt_key_cache_var;
+ dflt_key_cache_block_size= key_cache->block_size;
+ dflt_key_buff_size= key_cache->buff_size;
+ dflt_key_cache_division_limit= key_cache->division_limit;
+ dflt_key_cache_age_threshold= key_cache->age_threshold;
}
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b356bda6112..0ceeda9f5ef 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -895,10 +895,17 @@ static SEL_TREE *
get_mm_parts(PARAM *param, Field *field, Item_func::Functype type,
Item *value, Item_result cmp_type)
{
+ bool ne_func= FALSE;
DBUG_ENTER("get_mm_parts");
if (field->table != param->table)
DBUG_RETURN(0);
+ if (type == Item_func::NE_FUNC)
+ {
+ ne_func= TRUE;
+ type= Item_func::LT_FUNC;
+ }
+
KEY_PART *key_part = param->key_parts;
KEY_PART *end = param->key_parts_end;
SEL_TREE *tree=0;
@@ -933,6 +940,14 @@ get_mm_parts(PARAM *param, Field *field, Item_func::Functype type,
tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg);
}
}
+
+ if (ne_func)
+ {
+ SEL_TREE *tree2= get_mm_parts(param, field, Item_func::GT_FUNC,
+ value, cmp_type);
+ if (tree2)
+ tree= tree_or(param,tree,tree2);
+ }
DBUG_RETURN(tree);
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 2d200292726..5bebbce8532 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -60,6 +60,11 @@
#include "ha_innodb.h"
#endif
+ulonglong dflt_key_buff_size;
+uint dflt_key_cache_block_size;
+uint dflt_key_cache_division_limit;
+uint dflt_key_cache_age_threshold;
+
static HASH system_variable_hash;
const char *bool_type_names[]= { "OFF", "ON", NullS };
TYPELIB bool_typelib=
@@ -92,7 +97,7 @@ static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type);
static void fix_max_binlog_size(THD *thd, enum_var_type type);
static void fix_max_relay_log_size(THD *thd, enum_var_type type);
static void fix_max_connections(THD *thd, enum_var_type type);
-static KEY_CACHE *create_key_cache(const char *name, uint length);
+static KEY_CACHE_VAR *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
@@ -143,6 +148,11 @@ sys_var_thd_ulong sys_interactive_timeout("interactive_timeout",
sys_var_thd_ulong sys_join_buffer_size("join_buffer_size",
&SV::join_buff_size);
sys_var_key_buffer_size sys_key_buffer_size("key_buffer_size");
+sys_var_key_cache_block_size sys_key_cache_block_size("key_cache_block_size");
+sys_var_key_cache_division_limit
+ sys_key_cache_division_limit("key_cache_division_limit");
+sys_var_key_cache_age_threshold
+ sys_key_cache_age_threshold("key_cache_age_threshold");
sys_var_bool_ptr sys_local_infile("local_infile",
&opt_local_infile);
sys_var_thd_bool sys_log_warnings("log_warnings", &SV::log_warnings);
@@ -422,6 +432,9 @@ sys_var *sys_variables[]=
&sys_interactive_timeout,
&sys_join_buffer_size,
&sys_key_buffer_size,
+ &sys_key_cache_block_size,
+ &sys_key_cache_division_limit,
+ &sys_key_cache_age_threshold,
&sys_last_insert_id,
&sys_local_infile,
&sys_log_binlog,
@@ -595,6 +608,12 @@ struct show_var_st init_vars[]= {
{sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS},
{sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS},
{sys_key_buffer_size.name, (char*) &sys_key_buffer_size, SHOW_SYS},
+ {sys_key_cache_block_size.name, (char*) &sys_key_cache_block_size,
+ SHOW_SYS},
+ {sys_key_cache_division_limit.name, (char*) &sys_key_cache_division_limit,
+ SHOW_SYS},
+ {sys_key_cache_age_threshold.name, (char*) &sys_key_cache_age_threshold,
+ SHOW_SYS},
{"language", language, SHOW_CHAR},
{"large_files_support", (char*) &opt_large_files, SHOW_BOOL},
{sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS},
@@ -1697,79 +1716,143 @@ void sys_var_collation_server::set_default(THD *thd, enum_var_type type)
}
+static LEX_STRING default_key_cache_base= {(char *) DEFAULT_KEY_CACHE_NAME, 7};
+
+static KEY_CACHE_VAR zero_key_cache=
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+static KEY_CACHE_VAR *get_key_cache(LEX_STRING *cache_name)
+{
+ if (!cache_name || !cache_name->str || !cache_name->length ||
+ cache_name->str == default_key_cache_base.str ||
+ (cache_name->length == default_key_cache_base.length &&
+ !memcmp(cache_name->str, default_key_cache_base.str,
+ default_key_cache_base.length)))
+ cache_name= &default_key_cache_base;
+ return ((KEY_CACHE_VAR*) find_named(&key_caches,
+ cache_name->str, cache_name->length,
+ 0));
+}
+
+byte *sys_var_key_cache_param::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ KEY_CACHE_VAR *key_cache= get_key_cache(base);
+ if (!key_cache)
+ key_cache= &zero_key_cache;
+ return (byte*) key_cache + offset ;
+}
+
bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
{
ulonglong tmp= var->save_result.ulonglong_value;
- NAMED_LIST *list;
LEX_STRING *base_name= &var->base;
-
if (!base_name->length)
- {
- /* We are using SET KEY_BUFFER_SIZE=# */
- base_name->str= (char*) "default";
- base_name->length= 7;
- }
- KEY_CACHE *key_cache= (KEY_CACHE*) find_named(&key_caches, base_name->str,
- base_name->length, &list);
+ base_name= &default_key_cache_base;
+ KEY_CACHE_VAR *key_cache= get_key_cache(base_name);
+
if (!key_cache)
{
if (!tmp) // Tried to delete cache
return 0; // Ok, nothing to do
if (!(key_cache= create_key_cache(base_name->str,
- base_name->length)))
+ base_name->length)))
return 1;
}
if (!tmp) // Zero size means delete
{
- /* Don't delete the default key cache */
- if (base_name->length != 7 || memcmp(base_name->str, "default", 7))
+ if (!key_cache->cache)
+ return 0;
+ /* Delete not default key caches */
+ if (key_cache != &dflt_key_cache_var)
{
/*
- QQ: Here we should move tables that is using the found key cache
- to the default key cache
+ Move tables using this key cache to the default key cache
+ and remove this key cache if no tables are assigned to it
*/
+ NAMED_LIST *list;
+ key_cache= (KEY_CACHE_VAR *) find_named(&key_caches, base_name->str,
+ base_name->length, &list);
delete list;
+ int rc= reassign_keycache_tables(thd, key_cache,
+ default_key_cache_base.str, 1);
my_free((char*) key_cache, MYF(0));
- return 0;
+ return rc;
+
}
+ return 0;
}
- key_cache->size= (ulonglong) getopt_ull_limit_value(tmp, option_limits);
+ key_cache->buff_size= (ulonglong) getopt_ull_limit_value(tmp, option_limits);
+
+ if (!key_cache->cache)
+ return (bool)(ha_key_cache(key_cache));
+ else
+ return (bool)(ha_resize_key_cache(key_cache));
+}
- /* QQ: Needs to be updated when we have multiple key caches */
- keybuff_size= key_cache->size;
- ha_resize_key_cache();
+bool sys_var_key_cache_block_size::update(THD *thd, set_var *var)
+{
+ ulong tmp= var->value->val_int();
+ LEX_STRING *base_name= &var->base;
+ if (!base_name->length)
+ base_name= &default_key_cache_base;
+ KEY_CACHE_VAR *key_cache= get_key_cache(base_name);
+
+ if (!key_cache && !(key_cache= create_key_cache(base_name->str,
+ base_name->length)))
+ return 1;
+
+ key_cache->block_size= (ulong) getopt_ull_limit_value(tmp, option_limits);
+
+ if (key_cache->cache)
+ /* Do not build a new key cache here */
+ return (bool) (ha_resize_key_cache(key_cache));
return 0;
}
+bool sys_var_key_cache_division_limit::update(THD *thd, set_var *var)
+{
+ ulong tmp= var->value->val_int();
+ LEX_STRING *base_name= &var->base;
+ if (!base_name->length)
+ base_name= &default_key_cache_base;
+ KEY_CACHE_VAR *key_cache= get_key_cache(base_name);
+
+ if (!key_cache && !(key_cache= create_key_cache(base_name->str,
+ base_name->length)))
+ return 1;
+
+ key_cache->division_limit=
+ (ulong) getopt_ull_limit_value(tmp, option_limits);
-static ulonglong zero=0;
+ if (key_cache->cache)
+ /* Do not build a new key cache here */
+ return (bool) (ha_change_key_cache_param(key_cache));
+ return 0;
+}
-byte *sys_var_key_buffer_size::value_ptr(THD *thd, enum_var_type type,
- LEX_STRING *base)
+bool sys_var_key_cache_age_threshold::update(THD *thd, set_var *var)
{
- const char *name;
- uint length;
- KEY_CACHE *key_cache;
- NAMED_LIST *not_used;
+ ulong tmp= var->value->val_int();
+ LEX_STRING *base_name= &var->base;
+ if (!base_name->length)
+ base_name= &default_key_cache_base;
+ KEY_CACHE_VAR *key_cache= get_key_cache(base_name);
+
+ if (!key_cache && !(key_cache= create_key_cache(base_name->str,
+ base_name->length)))
+ return 1;
+
+ key_cache->division_limit=
+ (ulong) getopt_ull_limit_value(tmp, option_limits);
- if (!base->str)
- {
- name= "default";
- length= 7;
- }
- else
- {
- name= base->str;
- length= base->length;
- }
- key_cache= (KEY_CACHE*) find_named(&key_caches, name, length, &not_used);
- if (!key_cache)
- return (byte*) &zero;
- return (byte*) &key_cache->size;
+ if (key_cache->cache)
+ /* Do not build a new key cache here */
+ return (bool) (ha_change_key_cache_param(key_cache));
+ return 0;
}
-
/*****************************************************************************
@@ -2355,7 +2438,8 @@ gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length,
{
if (element->cmp(name, length))
{
- *found= element;
+ if (found)
+ *found= element;
return element->data;
}
}
@@ -2378,29 +2462,41 @@ void delete_elements(I_List<NAMED_LIST> *list, void (*free_element)(gptr))
/* Key cache functions */
-static KEY_CACHE *create_key_cache(const char *name, uint length)
+static KEY_CACHE_VAR *create_key_cache(const char *name, uint length)
{
- KEY_CACHE *key_cache;
+ KEY_CACHE_VAR *key_cache;
DBUG_PRINT("info",("Creating key cache: %.*s length: %d", length, name,
length));
- if ((key_cache= (KEY_CACHE*) my_malloc(sizeof(KEY_CACHE),
- MYF(MY_ZEROFILL | MY_WME))))
+ if (length != default_key_cache_base.length ||
+ memcmp(name, default_key_cache_base.str, length))
{
- if (!new NAMED_LIST(&key_caches, name, length, (gptr) key_cache))
+ if ((key_cache= (KEY_CACHE_VAR*) my_malloc(sizeof(KEY_CACHE_VAR),
+ MYF(MY_ZEROFILL | MY_WME))))
{
- my_free((char*) key_cache, MYF(0));
- key_cache= 0;
+ if (!new NAMED_LIST(&key_caches, name, length, (gptr) key_cache))
+ {
+ my_free((char*) key_cache, MYF(0));
+ key_cache= 0;
+ }
}
}
+ else
+ {
+ key_cache= &dflt_key_cache_var;
+ if (!new NAMED_LIST(&key_caches, name, length, (gptr) key_cache))
+ key_cache= 0;
+ }
+
return key_cache;
}
-KEY_CACHE *get_or_create_key_cache(const char *name, uint length)
+KEY_CACHE_VAR *get_or_create_key_cache(const char *name, uint length)
{
- NAMED_LIST *not_used;
- KEY_CACHE *key_cache= (KEY_CACHE*) find_named(&key_caches, name,
- length, &not_used);
+ LEX_STRING key_cache_name;
+ key_cache_name.str= (char *) name;
+ key_cache_name.length= length;
+ KEY_CACHE_VAR *key_cache= get_key_cache(&key_cache_name);
if (!key_cache)
key_cache= create_key_cache(name, length);
return key_cache;
@@ -2409,7 +2505,22 @@ KEY_CACHE *get_or_create_key_cache(const char *name, uint length)
void free_key_cache(gptr key_cache)
{
- my_free(key_cache, MYF(0));
+ if (key_cache != (gptr) &dflt_key_cache_var)
+ my_free(key_cache, MYF(0));
+}
+
+bool process_key_caches(int (* func) (KEY_CACHE_VAR *))
+{
+
+ I_List_iterator<NAMED_LIST> it(key_caches);
+ NAMED_LIST *element;
+ while ((element= it++))
+ {
+ KEY_CACHE_VAR *key_cache= (KEY_CACHE_VAR *) element->data;
+ if (key_cache != &dflt_key_cache_var)
+ func(key_cache);
+ }
+ return 0;
}
diff --git a/sql/set_var.h b/sql/set_var.h
index 752f275c9f2..16b2c1d5d37 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -30,6 +30,11 @@ class set_var;
typedef struct system_variables SV;
extern TYPELIB bool_typelib, delay_key_write_typelib, sql_mode_typelib;
+extern ulonglong dflt_key_buff_size;
+extern uint dflt_key_cache_block_size;
+extern uint dflt_key_cache_division_limit;
+extern uint dflt_key_cache_age_threshold;
+
enum enum_var_type
{
OPT_DEFAULT, OPT_SESSION, OPT_GLOBAL
@@ -546,15 +551,71 @@ public:
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
-class sys_var_key_buffer_size :public sys_var
+class sys_var_key_cache_param :public sys_var
{
+protected:
+ uint offset;
public:
- sys_var_key_buffer_size(const char *name_arg)
+ sys_var_key_cache_param(const char *name_arg)
:sys_var(name_arg)
- {}
+ {
+ offset= 0;
+ }
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+};
+
+class sys_var_key_buffer_size :public sys_var_key_cache_param
+{
+public:
+ sys_var_key_buffer_size(const char *name_arg)
+ :sys_var_key_cache_param(name_arg)
+ {
+ offset= offsetof(KEY_CACHE_VAR, buff_size);
+ }
bool update(THD *thd, set_var *var);
SHOW_TYPE type() { return SHOW_LONGLONG; }
- byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ bool check_default(enum_var_type type) { return 1; }
+ bool is_struct() { return 1; }
+};
+
+class sys_var_key_cache_block_size :public sys_var_key_cache_param
+{
+public:
+ sys_var_key_cache_block_size(const char *name_arg)
+ :sys_var_key_cache_param(name_arg)
+ {
+ offset= offsetof(KEY_CACHE_VAR, block_size);
+ }
+ bool update(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_LONG; }
+ bool check_default(enum_var_type type) { return 1; }
+ bool is_struct() { return 1; }
+};
+
+class sys_var_key_cache_division_limit :public sys_var_key_cache_param
+{
+public:
+ sys_var_key_cache_division_limit(const char *name_arg)
+ :sys_var_key_cache_param(name_arg)
+ {
+ offset= offsetof(KEY_CACHE_VAR, division_limit);
+ }
+ bool update(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_LONG; }
+ bool check_default(enum_var_type type) { return 1; }
+ bool is_struct() { return 1; }
+};
+
+class sys_var_key_cache_age_threshold :public sys_var_key_cache_param
+{
+public:
+ sys_var_key_cache_age_threshold(const char *name_arg)
+ :sys_var_key_cache_param(name_arg)
+ {
+ offset= offsetof(KEY_CACHE_VAR, age_threshold);
+ }
+ bool update(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_LONG; }
bool check_default(enum_var_type type) { return 1; }
bool is_struct() { return 1; }
};
@@ -785,5 +846,6 @@ gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length,
void delete_elements(I_List<NAMED_LIST> *list, void (*free_element)(gptr));
/* key_cache functions */
-KEY_CACHE *get_or_create_key_cache(const char *name, uint length);
+KEY_CACHE_VAR *get_or_create_key_cache(const char *name, uint length);
void free_key_cache(gptr key_cache);
+bool process_key_caches(int (* func) (KEY_CACHE_VAR *));
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a926c6e66fe..53f430924d7 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -30,6 +30,7 @@
TABLE *unused_tables; /* Used by mysql_test */
HASH open_cache; /* Used by mysql_test */
+HASH assign_cache;
static int open_unireg_entry(THD *thd,TABLE *entry,const char *db,
const char *name, const char *alias);
@@ -53,7 +54,6 @@ void table_cache_init(void)
mysql_rm_tmp_tables();
}
-
void table_cache_free(void)
{
DBUG_ENTER("table_cache_free");
@@ -63,7 +63,6 @@ void table_cache_free(void)
DBUG_VOID_RETURN;
}
-
uint cached_tables(void)
{
return open_cache.records;
@@ -762,6 +761,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
reg1 TABLE *table;
char key[MAX_DBKEY_LENGTH];
uint key_length;
+ KEY_CACHE_ASMT *key_cache_asmt;
+ KEY_CACHE_VAR *key_cache;
DBUG_ENTER("open_table");
/* find a unused table in the open table cache */
@@ -802,6 +803,77 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
my_printf_error(ER_TABLE_NOT_LOCKED,ER(ER_TABLE_NOT_LOCKED),MYF(0),alias);
DBUG_RETURN(0);
}
+
+ VOID(pthread_mutex_lock(&LOCK_assign));
+ key_cache_asmt= (KEY_CACHE_ASMT*) hash_search(&assign_cache,
+ (byte*) key, key_length) ;
+ if (thd->open_options & HA_OPEN_TO_ASSIGN)
+ {
+ /* When executing a CACHE INDEX command*/
+ if (key_cache_asmt)
+ {
+ if (key_cache_asmt->requests++)
+ {
+ /* Another thread are assigning this table to some key cache*/
+
+ /* Put the assignment request into the queue of such requests */
+ struct st_my_thread_var *last;
+ struct st_my_thread_var *thread= thd->mysys_var;
+ if (! (last= key_cache_asmt->queue))
+ thread->next= thread;
+ else
+ {
+ thread->next= last->next;
+ last->next= thread;
+ }
+ key_cache_asmt->queue= thread;
+
+ /* Wait until the request can be processed */
+ do
+ {
+ VOID(pthread_cond_wait(&thread->suspend, &LOCK_assign));
+ }
+ while (thread->next);
+ }
+ }
+ else
+ {
+ /*
+ The table has not been explicitly assigned to any key cache yet;
+ by default it's assigned to the default key cache;
+ */
+
+ if (!(key_cache_asmt=
+ (KEY_CACHE_ASMT *) my_malloc(sizeof(*key_cache_asmt),
+ MYF(MY_WME | MY_ZEROFILL))) ||
+ !(key_cache_asmt->db_name= my_strdup(db, MYF(MY_WME))) ||
+ !(key_cache_asmt->table_name= my_strdup(table_name, MYF(MY_WME))) ||
+ !(key_cache_asmt->table_key= my_memdup((const byte *) key,
+ key_length, MYF(MY_WME))))
+ {
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+
+ if (key_cache_asmt)
+ {
+ if (key_cache_asmt->db_name)
+ my_free((gptr) key_cache_asmt->db_name, MYF(0));
+ if (key_cache_asmt->table_name)
+ my_free((gptr) key_cache_asmt->table_name, MYF(0));
+ my_free((gptr) key_cache_asmt, MYF(0));
+ }
+ DBUG_RETURN(NULL);
+ }
+ key_cache_asmt->key_length= key_length;
+ key_cache_asmt->key_cache= &dflt_key_cache_var;
+ VOID(my_hash_insert(&assign_cache, (byte *) key_cache_asmt));
+ key_cache_asmt->requests++;
+ }
+ key_cache_asmt->to_reassign= 0;
+ }
+
+ key_cache= key_cache_asmt ? key_cache_asmt->key_cache : &dflt_key_cache_var;
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+
VOID(pthread_mutex_lock(&LOCK_open));
if (!thd->open_tables)
@@ -844,6 +916,9 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
}
table->prev->next=table->next; /* Remove from unused list */
table->next->prev=table->prev;
+
+ table->key_cache= key_cache;
+ table->key_cache_asmt= key_cache_asmt;
}
else
{
@@ -857,6 +932,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(NULL);
}
+ table->key_cache= key_cache;
+ table->key_cache_asmt= key_cache_asmt;
if (open_unireg_entry(thd, table,db,table_name,alias) ||
!(table->table_cache_key=memdup_root(&table->mem_root,(char*) key,
key_length)))
@@ -875,6 +952,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
table->in_use=thd;
check_unused();
+
+
VOID(pthread_mutex_unlock(&LOCK_open));
if (refresh)
{
@@ -1646,6 +1725,54 @@ bool rm_temporary_table(enum db_type base, char *path)
DBUG_RETURN(error);
}
+static void free_assign_entry(KEY_CACHE_ASMT *key_cache_asmt)
+{
+ DBUG_ENTER("free_assign_entry");
+ my_free((gptr) key_cache_asmt->table_key, MYF(0));
+ my_free((gptr) key_cache_asmt, MYF(0));
+ DBUG_VOID_RETURN;
+}
+
+static byte *assign_cache_key(const byte *record,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ KEY_CACHE_ASMT *entry=(KEY_CACHE_ASMT *) record;
+ *length=entry->key_length;
+ return (byte*) entry->table_key;
+}
+
+void assign_cache_init(void)
+{
+ VOID(hash_init(&assign_cache, &my_charset_bin,
+ table_cache_size+16, 0, 0, assign_cache_key,
+ (hash_free_key) free_assign_entry,0));
+}
+
+void assign_cache_free(void)
+{
+ DBUG_ENTER("assign_cache_free");
+ hash_free(&assign_cache);
+ DBUG_VOID_RETURN;
+}
+
+void reassign_key_cache(KEY_CACHE_ASMT *key_cache_asmt,
+ KEY_CACHE_VAR *new_key_cache)
+{
+ if (key_cache_asmt->prev)
+ {
+ /* Unlink key_cache_asmt from the assignment list for the old key cache */
+ if ((*key_cache_asmt->prev= key_cache_asmt->next))
+ key_cache_asmt->next->prev= key_cache_asmt->prev;
+ }
+ /* Link key_cache_asmt into the assignment list for the new key cache */
+ key_cache_asmt->prev= &new_key_cache->assign_list;
+ if ((key_cache_asmt->next= new_key_cache->assign_list))
+ key_cache_asmt->next->prev= &key_cache_asmt->next;
+ new_key_cache->assign_list= key_cache_asmt;
+
+ key_cache_asmt->key_cache= new_key_cache;
+}
+
/*****************************************************************************
** find field in list or tables. if field is unqualifed and unique,
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 352a79843a9..c3ff69184e6 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1218,7 +1218,16 @@ TABLE_LIST* st_select_lex_node::get_table_list() { return 0; }
List<Item>* st_select_lex_node::get_item_list() { return 0; }
List<String>* st_select_lex_node::get_use_index() { return 0; }
List<String>* st_select_lex_node::get_ignore_index() { return 0; }
-
+TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table,
+ LEX_STRING *alias,
+ ulong table_join_options,
+ thr_lock_type flags,
+ List<String> *use_index,
+ List<String> *ignore_index,
+ LEX_STRING *option)
+{
+ return 0;
+}
ulong st_select_lex_node::get_table_join_options()
{
return 0;
@@ -1242,6 +1251,28 @@ bool st_select_lex::test_limit()
return(0);
}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
Interface method of table list creation for query
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 7545f525082..c31420b951c 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -59,7 +59,8 @@ enum enum_sql_command {
SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, SQLCOM_ALTER_DB,
SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT,
SQLCOM_CREATE_FUNCTION, SQLCOM_DROP_FUNCTION,
- SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, SQLCOM_PRELOAD_KEYS,
+ SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK,
+ SQLCOM_ASSIGN_TO_KEYCACHE, SQLCOM_PRELOAD_KEYS,
SQLCOM_FLUSH, SQLCOM_KILL, SQLCOM_ANALYZE,
SQLCOM_ROLLBACK, SQLCOM_ROLLBACK_TO_SAVEPOINT,
SQLCOM_COMMIT, SQLCOM_SAVEPOINT,
@@ -257,6 +258,13 @@ public:
virtual List<String>* get_use_index();
virtual List<String>* get_ignore_index();
virtual ulong get_table_join_options();
+ virtual TABLE_LIST *add_table_to_list(THD *thd, Table_ident *table,
+ LEX_STRING *alias,
+ ulong table_options,
+ thr_lock_type flags= TL_UNLOCK,
+ List<String> *use_index= 0,
+ List<String> *ignore_index= 0,
+ LEX_STRING *option= 0);
virtual void set_lock_for_tables(thr_lock_type lock_type) {}
friend class st_select_lex_unit;
@@ -443,8 +451,8 @@ public:
ulong table_options,
thr_lock_type flags= TL_UNLOCK,
List<String> *use_index= 0,
- List<String> *ignore_index= 0);
-
+ List<String> *ignore_index= 0,
+ LEX_STRING *option= 0);
TABLE_LIST* get_table_list();
List<Item>* get_item_list();
List<String>* get_use_index();
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 36a7d19a3c5..596c24541fb 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1958,10 +1958,20 @@ mysql_execute_command(THD *thd)
res = mysql_restore_table(thd, tables);
break;
}
+ case SQLCOM_ASSIGN_TO_KEYCACHE:
+ {
+ if (check_db_used(thd, tables) ||
+ check_access(thd, INDEX_ACL, tables->db,
+ &tables->grant.privilege, 0, 0))
+ goto error;
+ res = mysql_assign_to_keycache(thd, tables);
+ break;
+ }
case SQLCOM_PRELOAD_KEYS:
{
if (check_db_used(thd, tables) ||
- check_access(thd, INDEX_ACL, tables->db, &tables->grant.privilege,0,0))
+ check_access(thd, INDEX_ACL, tables->db,
+ &tables->grant.privilege, 0, 0))
goto error;
res = mysql_preload_keys(thd, tables);
break;
@@ -4264,7 +4274,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ulong table_options,
thr_lock_type lock_type,
List<String> *use_index,
- List<String> *ignore_index)
+ List<String> *ignore_index,
+ LEX_STRING *option)
{
register TABLE_LIST *ptr;
char *alias_str;
@@ -4325,7 +4336,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
if (ignore_index)
ptr->ignore_index=(List<String> *) thd->memdup((gptr) ignore_index,
sizeof(*ignore_index));
-
+ ptr->option= option ? option->str : 0;
/* check that used name is unique */
if (lock_type != TL_IGNORE)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 3d07c7e04de..ba172aa6bda 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1588,6 +1588,112 @@ int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt)
/*
+ Assigned specified indexes for a table into key cache
+
+ SYNOPSIS
+ mysql_assign_to_keycache()
+ thd Thread object
+ tables Table list (one table only)
+
+ RETURN VALUES
+ 0 ok
+ -1 error
+*/
+
+int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables)
+{
+ DBUG_ENTER("mysql_assign_to_keycache");
+ DBUG_RETURN(mysql_admin_table(thd, tables, 0,
+ "assign_to_keycache", TL_READ, 0,
+ HA_OPEN_TO_ASSIGN, 0,
+ &handler::assign_to_keycache));
+}
+
+
+/*
+ Reassign all tables assigned to a key cache to another key cache
+
+ SYNOPSIS
+ reassign_keycache_tables()
+ thd Thread object
+ src_cache Reference to the key cache to clean up
+ dest_name Name of the cache to assign tables to
+ remove_fl Flag to destroy key cache when all tables are reassigned
+
+ RETURN VALUES
+ 0 ok
+ -1 error
+*/
+
+int reassign_keycache_tables(THD* thd, KEY_CACHE_VAR* src_cache,
+ char *dest_name, bool remove_fl)
+{
+ int rc= 0;
+ TABLE_LIST table;
+ KEY_CACHE_ASMT *key_cache_asmt;
+
+ DBUG_ENTER("reassign_keycache_tables");
+
+ VOID(pthread_mutex_lock(&LOCK_assign));
+ for (key_cache_asmt= src_cache->assign_list ;
+ key_cache_asmt;
+ key_cache_asmt= key_cache_asmt->next)
+ key_cache_asmt->to_reassign = 1;
+ key_cache_asmt= src_cache->assign_list;
+ while (key_cache_asmt)
+ {
+ if (key_cache_asmt->to_reassign)
+ {
+ bool refresh;
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+ bzero((byte *) &table, sizeof(table));
+ table.option= dest_name;
+ table.db= key_cache_asmt->db_name;
+ table.alias= table.real_name= key_cache_asmt->table_name;
+ thd->open_options|= HA_OPEN_TO_ASSIGN;
+ while (!(table.table=open_table(thd,table.db,
+ table.real_name,table.alias,
+ &refresh)) && refresh) ;
+ thd->open_options&= ~HA_OPEN_TO_ASSIGN;
+ if (!table.table)
+ DBUG_RETURN(-1);
+ table.table->pos_in_table_list= &table;
+ key_cache_asmt->triggered= 1;
+ rc= table.table->file->assign_to_keycache(thd, 0);
+ close_thread_tables(thd);
+ if (rc)
+ DBUG_RETURN(rc);
+ VOID(pthread_mutex_lock(&LOCK_assign));
+ key_cache_asmt= src_cache->assign_list;
+ continue;
+ }
+ else
+ key_cache_asmt= key_cache_asmt->next;
+ }
+
+ while (src_cache->assignments)
+ {
+ struct st_my_thread_var *waiting_thread= my_thread_var;
+ pthread_cond_wait(&waiting_thread->suspend, &LOCK_assign);
+ }
+ if (src_cache->extra_info)
+ {
+ my_free((char *) src_cache->extra_info, MYF(0));
+ src_cache->extra_info= 0;
+ }
+
+ if (remove_fl && !src_cache->assign_list && src_cache != &dflt_key_cache_var)
+ {
+ end_key_cache(&src_cache->cache, 1);
+ src_cache->buff_size= 0;
+ src_cache->block_size= 0;
+ }
+ VOID(pthread_mutex_unlock(&LOCK_assign));
+ DBUG_RETURN(0);
+}
+
+
+/*
Preload specified indexes for a table into key cache
SYNOPSIS
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 112d42e4643..0ffdd5f1bcf 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -322,8 +322,9 @@ w_requests: %10lu\n\
writes: %10lu\n\
r_requests: %10lu\n\
reads: %10lu\n",
- my_blocks_used,my_blocks_changed,my_cache_w_requests,
- my_cache_write,my_cache_r_requests,my_cache_read);
+ dflt_key_cache_var.blocks_used,dflt_key_cache_var.blocks_changed,
+ dflt_key_cache_var.cache_w_requests,dflt_key_cache_var.cache_write,
+ dflt_key_cache_var.cache_r_requests,dflt_key_cache_var.cache_read);
pthread_mutex_unlock(&THR_LOCK_keycache);
if (thd)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index c7a27592a7b..c2a014f915d 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -683,12 +683,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%type <NONE>
query verb_clause create change select do drop insert replace insert2
insert_values update delete truncate rename
- show describe load alter optimize preload flush
+ show describe load alter optimize keycache preload flush
reset purge begin commit rollback savepoint
slave master_def master_defs master_file_def
repair restore backup analyze check start checksum
field_list field_list_item field_spec kill column_def key_def
- preload_list preload_keys
+ keycache_list assign_to_keycache preload_list preload_keys
select_item_list select_item values_list no_braces
opt_limit_clause delete_limit_clause fields opt_values values
procedure_list procedure_list2 procedure_item
@@ -760,6 +760,7 @@ verb_clause:
| load
| lock
| optimize
+ | keycache
| preload
| purge
| rename
@@ -1984,6 +1985,45 @@ table_to_table:
YYABORT;
};
+keycache:
+ CACHE_SYM INDEX
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_ASSIGN_TO_KEYCACHE;
+ }
+ keycache_list
+ {}
+ ;
+
+keycache_list:
+ assign_to_keycache
+ | keycache_list ',' assign_to_keycache;
+
+assign_to_keycache:
+ table_ident cache_keys_spec IN_SYM ident
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ if (!sel->add_table_to_list(lex->thd, $1, NULL, 0,
+ TL_READ,
+ sel->get_use_index(),
+ (List<String> *)0,
+ &($4)))
+ YYABORT;
+ }
+ |
+ table_ident cache_keys_spec IN_SYM DEFAULT
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ if (!sel->add_table_to_list(lex->thd, $1, NULL, 0,
+ TL_READ,
+ sel->get_use_index(),
+ (List<String> *)0))
+ YYABORT;
+ }
+ ;
+
preload:
LOAD INDEX INTO CACHE_SYM
{
@@ -1999,7 +2039,7 @@ preload_list:
| preload_list ',' preload_keys;
preload_keys:
- table_ident preload_keys_spec opt_ignore_leaves
+ table_ident cache_keys_spec opt_ignore_leaves
{
LEX *lex=Lex;
SELECT_LEX *sel= &lex->select_lex;
@@ -2011,18 +2051,18 @@ preload_keys:
}
;
-preload_keys_spec:
- keys_or_index { Select->interval_list.empty(); }
- preload_key_list_or_empty
- {
- LEX *lex=Lex;
- SELECT_LEX *sel= &lex->select_lex;
- sel->use_index= sel->interval_list;
- sel->use_index_ptr= &sel->use_index;
- }
- ;
+cache_keys_spec:
+ keys_or_index { Select->interval_list.empty(); }
+ cache_key_list_or_empty
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= &lex->select_lex;
+ sel->use_index= sel->interval_list;
+ sel->use_index_ptr= &sel->use_index;
+ }
+ ;
-preload_key_list_or_empty:
+cache_key_list_or_empty:
/* empty */
| '(' key_usage_list2 ')' {}
;
diff --git a/sql/table.h b/sql/table.h
index 7b4e5745732..b9c6a72bb09 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -55,6 +55,31 @@ typedef struct st_filesort_info
ha_rows found_records; /* How many records in sort */
} FILESORT_INFO;
+
+/* Table key cache assignment descriptor */
+/*
+ In future the similar structure is to be used for
+ an assignment of an index to a key cache: the index name will be added.
+ The name of the database catalog will be added as well.
+ The descriptors for the current assignments are put in the
+ assignment cache: assign_cache. If a table is not found in the cache
+ it is considered assigned to the default key cache.
+*/
+typedef struct st_key_cache_asmt
+{
+ char *db_name; /* db the table belongs to */
+ char *table_name; /* the name of the table */
+ char *table_key; /* key for the assignment cache */
+ uint key_length; /* the length of this key */
+ struct st_key_cache_var *key_cache; /* reference to the key cache */
+ struct st_key_cache_asmt **prev; /* links in the chain all assignments */
+ struct st_key_cache_asmt *next; /* to this cache */
+ struct st_my_thread_var *queue; /* queue of requests for assignment */
+ uint requests; /* number of current requests */
+ bool to_reassign; /* marked when reassigning all cache */
+ bool triggered; /* marked when assignment is triggered*/
+} KEY_CACHE_ASMT;
+
/* Table cache entry struct */
class Field_timestamp;
@@ -62,11 +87,13 @@ class Field_blob;
struct st_table {
handler *file;
- Field **field; /* Pointer to fields */
+ KEY_CACHE_VAR *key_cache; /* Ref to the key cache the table assigned to*/
+ KEY_CACHE_ASMT *key_cache_asmt;/* Only when opened for key cache assignment */
+ Field **field; /* Pointer to fields */
Field_blob **blob_field; /* Pointer to blob fields */
HASH name_hash; /* hash of field names */
byte *record[2]; /* Pointer to records */
- byte *default_values; /* record with default values for INSERT */
+ byte *default_values; /* Record with default values for INSERT */
byte *insert_values; /* used by INSERT ... UPDATE */
uint fields; /* field count */
uint reclength; /* Recordlength */
@@ -161,6 +188,7 @@ typedef struct st_table_list
{
struct st_table_list *next;
char *db, *alias, *real_name;
+ char *option; /* Used by cache index */
Item *on_expr; /* Used with outer join */
struct st_table_list *natural_join; /* natural join on this table*/
/* ... join ... USE INDEX ... IGNORE INDEX */
@@ -192,3 +220,5 @@ typedef struct st_open_table_list
char *db,*table;
uint32 in_use,locked;
} OPEN_TABLE_LIST;
+
+