diff options
author | unknown <monty@narttu.mysql.fi> | 2003-03-25 02:37:41 +0200 |
---|---|---|
committer | unknown <monty@narttu.mysql.fi> | 2003-03-25 02:37:41 +0200 |
commit | 11841c05ec1b2db812e9827e9206551e42d9e87d (patch) | |
tree | 6b0e070b3d93ce9b3d3f7d622757752b48914aab /mysys/mf_keycache.c | |
parent | f33c97281a9861467553bbaaa28a0b205dfcd291 (diff) | |
download | mariadb-git-11841c05ec1b2db812e9827e9206551e42d9e87d.tar.gz |
Fixed bug in allocation memory in key cache. (Memory was not properly aligned which cased core dumps on sparc CPU's)
Changed keycache variables to start with my_ instead of _my_
include/my_sys.h:
Changed keycache variables to start with my_ instead of _my_
myisam/mi_test2.c:
Changed keycache variables to start with my_ instead of _my_
Removed compiler warnings
myisam/sp_test.c:
Removed compiler warning
mysys/mf_keycache.c:
Fixed bug in allocation memory (Memory was not properly aligned which cased core dumps on sparc CPU's)
Changed keycache variables to start with my_ instead of _my_
Fixed indentation and comment syntax.
Removed end space.
sql/mysqld.cc:
Changed keycache variables to start with my_ instead of _my_
sql/sql_test.cc:
Changed keycache variables to start with my_ instead of _my_
Diffstat (limited to 'mysys/mf_keycache.c')
-rw-r--r-- | mysys/mf_keycache.c | 937 |
1 files changed, 494 insertions, 443 deletions
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 482a594fa73..45cbcdb3ab7 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -29,42 +29,42 @@ #include <assert.h> #include <stdarg.h> -/* - Some compilation flags have been added specifically for this module - to control the following: - - not to let a thread to yield the control when reading directly - from key cache, which might improve performance in many cases; - to enable this add: - #define SERIALIZED_READ_FROM_CACHE - - to set an upper bound for number of threads simultaneously - using the key cache; this setting helps to determine an optimal - size for hash table and improve performance when the number of - blocks in the key cache much less than the number of threads - accessing it; - to set this number equal to <N> add - #define MAX_THREADS <N> - - to substitute calls of pthread_cond_wait for calls of - pthread_cond_timedwait (wait with timeout set up); - this setting should be used only when you want to trap a deadlock - situation, which theoretically should not happen; - to set timeout equal to <T> seconds add - #define KEYCACHE_TIMEOUT <T> - - to enable the module traps and to send debug information from - key cache module to a special debug log add: - #define KEYCACHE_DEBUG - the name of this debug log file <LOG NAME> can be set through: - #define KEYCACHE_DEBUG_LOG <LOG NAME> - if the name is not defined, it's set by default; - if the KEYCACHE_DEBUG flag is not set up and we are in a debug - mode, i.e. when ! defined(DBUG_OFF), the debug information from the - module is sent to the regular debug log. - - Example of the settings: - #define SERIALIZED_READ_FROM_CACHE - #define MAX_THREADS 100 - #define KEYCACHE_TIMEOUT 1 - #define KEYCACHE_DEBUG - #define KEYCACHE_DEBUG_LOG "my_key_cache_debug.log" +/* + Some compilation flags have been added specifically for this module + to control the following: + - not to let a thread to yield the control when reading directly + from key cache, which might improve performance in many cases; + to enable this add: + #define SERIALIZED_READ_FROM_CACHE + - to set an upper bound for number of threads simultaneously + using the key cache; this setting helps to determine an optimal + size for hash table and improve performance when the number of + blocks in the key cache much less than the number of threads + accessing it; + to set this number equal to <N> add + #define MAX_THREADS <N> + - to substitute calls of pthread_cond_wait for calls of + pthread_cond_timedwait (wait with timeout set up); + this setting should be used only when you want to trap a deadlock + situation, which theoretically should not happen; + to set timeout equal to <T> seconds add + #define KEYCACHE_TIMEOUT <T> + - to enable the module traps and to send debug information from + key cache module to a special debug log add: + #define KEYCACHE_DEBUG + the name of this debug log file <LOG NAME> can be set through: + #define KEYCACHE_DEBUG_LOG <LOG NAME> + if the name is not defined, it's set by default; + if the KEYCACHE_DEBUG flag is not set up and we are in a debug + mode, i.e. when ! defined(DBUG_OFF), the debug information from the + module is sent to the regular debug log. + + Example of the settings: + #define SERIALIZED_READ_FROM_CACHE + #define MAX_THREADS 100 + #define KEYCACHE_TIMEOUT 1 + #define KEYCACHE_DEBUG + #define KEYCACHE_DEBUG_LOG "my_key_cache_debug.log" */ #if defined(MSDOS) && !defined(M_IC80386) @@ -83,20 +83,24 @@ #define COND_FOR_SAVED 1 #define COND_FOR_READERS 2 -typedef pthread_cond_t KEYCACHE_CONDVAR; -typedef struct st_keycache_wqueue -{ /* info about requests in a waiting queue */ +typedef pthread_cond_t KEYCACHE_CONDVAR; + +/* info about requests in a waiting queue */ +typedef struct st_keycache_wqueue +{ struct st_my_thread_var *last_thread; /* circular list of waiting threads */ } KEYCACHE_WQUEUE; +/* descriptor of the page in the key cache block buffer */ typedef struct st_keycache_page -{ /* descriptor of the page in the key cache block buffer */ +{ int file; /* file to which the page belongs to */ my_off_t filepos; /* position of the page in the file */ } KEYCACHE_PAGE; -typedef struct st_hash_link -{ /* element in the chain of a hash table bucket */ +/* element in the chain of a hash table bucket */ +typedef struct st_hash_link +{ struct st_hash_link *next, **prev; /* to connect links in the same bucket */ struct st_block_link *block; /* reference to the block for the page: */ File file; /* from such a file */ @@ -117,8 +121,9 @@ typedef struct st_hash_link #define PAGE_TO_BE_READ 1 #define PAGE_WAIT_TO_BE_READ 2 +/* key cache block */ typedef struct st_block_link -{ /* key cache block */ +{ struct st_block_link *next_used, **prev_used; /* to connect links in the LRU chain (ring) */ struct st_block_link @@ -143,40 +148,36 @@ static uint key_cache_shift; #define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ #define FLUSH_CACHE 2000 /* sort this many blocks at once */ -static KEYCACHE_WQUEUE +static KEYCACHE_WQUEUE waiting_for_hash_link; /* queue of requests waiting for a free hash link */ -static KEYCACHE_WQUEUE +static KEYCACHE_WQUEUE waiting_for_block; /* queue of requests waiting for a free block */ -static HASH_LINK **_my_hash_root; /* arr. of entries into hash table buckets */ -static uint _my_hash_entries; /* max number of entries in the hash table */ -static HASH_LINK *_my_hash_link_root; /* memory for hash table links */ -static int _my_hash_links; /* max number of hash links */ -static int _my_hash_links_used; /* number of hash links currently used */ -static HASH_LINK *_my_free_hash_list; /* list of free hash links */ -static BLOCK_LINK *_my_block_root; /* memory for block links */ -static int _my_disk_blocks; /* max number of blocks in the cache */ -static byte HUGE_PTR *_my_block_mem; /* memory for block buffers */ -static BLOCK_LINK *_my_used_last; /* ptr to the last block of the LRU chain */ -ulong _my_blocks_used, /* number of currently used blocks */ - _my_blocks_changed; /* number of currently dirty blocks */ +static HASH_LINK **my_hash_root; /* arr. of entries into hash table buckets */ +static uint my_hash_entries; /* max number of entries in the hash table */ +static HASH_LINK *my_hash_link_root; /* memory for hash table links */ +static int my_hash_links; /* max number of hash links */ +static int my_hash_links_used; /* number of hash links currently used */ +static HASH_LINK *my_free_hash_list; /* list of free hash links */ +static BLOCK_LINK *my_block_root; /* memory for block links */ +static int my_disk_blocks; /* max number of blocks in the cache */ +static byte HUGE_PTR *my_block_mem; /* memory for block buffers */ +static BLOCK_LINK *my_used_last; /* ptr to the last block of the LRU chain */ +ulong my_blocks_used, /* number of currently used blocks */ + my_blocks_changed; /* number of currently dirty blocks */ #if defined(KEYCACHE_DEBUG) static -ulong _my_blocks_available; /* number of blocks available in the LRU chain */ +ulong my_blocks_available; /* number of blocks available in the LRU chain */ #endif /* defined(KEYCACHE_DEBUG) */ -ulong _my_cache_w_requests,_my_cache_write, /* counters */ - _my_cache_r_requests,_my_cache_read; /* for statistics */ -static BLOCK_LINK +ulong my_cache_w_requests, my_cache_write, /* counters */ + my_cache_r_requests, my_cache_read; /* for statistics */ +static BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash table for file dirty blocks */ static BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash table for other file blocks */ /* that are not free */ -#ifndef DBUG_OFF -static my_bool _my_printed; -#endif - #define KEYCACHE_HASH(f, pos) \ - (((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (_my_hash_entries-1)) + (((ulong) ((pos) >> key_cache_shift)+(ulong) (f)) & (my_hash_entries-1)) #define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1)) #define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log" @@ -194,11 +195,11 @@ static void keycache_debug_print _VARARGS((const char *fmt,...)); #define KEYCACHE_DEBUG_CLOSE \ if (keycache_debug_log) fclose(keycache_debug_log) #else -#define KEYCACHE_DEBUG_OPEN +#define KEYCACHE_DEBUG_OPEN #define KEYCACHE_DEBUG_CLOSE #endif /* defined(KEYCACHE_DEBUG_LOG) */ -#if defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) +#if defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) #define KEYCACHE_DBUG_PRINT(l, m) \ { if (keycache_debug_log) fprintf(keycache_debug_log, "%s: ", l); \ keycache_debug_print m; } @@ -224,15 +225,15 @@ static long keycache_thread_id; #define KEYCACHE_THREAD_TRACE_END(l) \ KEYCACHE_DBUG_PRINT(l,("]thread %ld",keycache_thread_id)) #else -#define KEYCACHE_THREAD_TRACE_BEGIN(l) -#define KEYCACHE_THREAD_TRACE_END(l) -#define KEYCACHE_THREAD_TRACE(l) +#define KEYCACHE_THREAD_TRACE_BEGIN(l) +#define KEYCACHE_THREAD_TRACE_END(l) +#define KEYCACHE_THREAD_TRACE(l) #endif /* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */ #define BLOCK_NUMBER(b) \ - ((uint) (((char*)(b) - (char *) _my_block_root) / sizeof(BLOCK_LINK))) + ((uint) (((char*)(b) - (char *) my_block_root) / sizeof(BLOCK_LINK))) #define HASH_LINK_NUMBER(h) \ - ((uint) (((char*)(h) - (char *) _my_hash_link_root) / sizeof(HASH_LINK))) + ((uint) (((char*)(h) - (char *) my_hash_link_root) / sizeof(HASH_LINK))) #if (defined(KEYCACHE_TIMEOUT) && !defined(__WIN__)) || defined(KEYCACHE_DEBUG) static int keycache_pthread_cond_wait(pthread_cond_t *cond, @@ -266,18 +267,19 @@ static uint next_power(uint value) /* - Initialize the key cache, - return number of blocks in it + Initialize the key cache, + return number of blocks in it */ + int init_key_cache(ulong use_mem) { uint blocks, hash_links, length; int error; - + DBUG_ENTER("init_key_cache"); - + KEYCACHE_DEBUG_OPEN; - if (key_cache_inited && _my_disk_blocks > 0) + if (key_cache_inited && my_disk_blocks > 0) { DBUG_PRINT("warning",("key cache already in use")); DBUG_RETURN(0); @@ -285,101 +287,101 @@ int init_key_cache(ulong use_mem) if (! key_cache_inited) { key_cache_inited=TRUE; - _my_disk_blocks= -1; + my_disk_blocks= -1; key_cache_shift=my_bit_log2(key_cache_block_size); - DBUG_PRINT("info",("key_cache_block_size: %u", + DBUG_PRINT("info",("key_cache_block_size: %u", key_cache_block_size)); -#ifndef DBUG_OFF - _my_printed=0; -#endif } - - _my_cache_w_requests=_my_cache_r_requests=_my_cache_read=_my_cache_write=0; - - _my_block_mem=NULL; - _my_block_root=NULL; - + + my_cache_w_requests= my_cache_r_requests= my_cache_read= my_cache_write=0; + + my_block_mem=NULL; + my_block_root=NULL; + blocks= (uint) (use_mem/(sizeof(BLOCK_LINK)+2*sizeof(HASH_LINK)+ sizeof(HASH_LINK*)*5/4+key_cache_block_size)); /* It doesn't make sense to have too few blocks (less than 8) */ - if (blocks >= 8 && _my_disk_blocks < 0) + if (blocks >= 8 && my_disk_blocks < 0) { for (;;) { - /* Set _my_hash_entries to the next bigger 2 power */ - if ((_my_hash_entries=next_power(blocks)) < blocks*5/4) - _my_hash_entries<<=1; + /* Set my_hash_entries to the next bigger 2 power */ + if ((my_hash_entries=next_power(blocks)) < blocks*5/4) + my_hash_entries<<=1; hash_links=2*blocks; #if defined(MAX_THREADS) if (hash_links < MAX_THREADS + blocks - 1) hash_links=MAX_THREADS + blocks - 1; #endif - while ((length=blocks*sizeof(BLOCK_LINK)+hash_links*sizeof(HASH_LINK)+ - sizeof(HASH_LINK*)*_my_hash_entries)+ - ((ulong) blocks << key_cache_shift) > - use_mem) + while ((length=(ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))+ + ALIGN_SIZE(hash_links*sizeof(HASH_LINK))+ + ALIGN_SIZE(sizeof(HASH_LINK*)*my_hash_entries)))+ + ((ulong) blocks << key_cache_shift) > use_mem) blocks--; /* Allocate memory for cache page buffers */ - if ((_my_block_mem=my_malloc_lock((ulong) blocks*key_cache_block_size, - MYF(0)))) + if ((my_block_mem=my_malloc_lock((ulong) blocks*key_cache_block_size, + MYF(0)))) { - /* + /* Allocate memory for blocks, hash_links and hash entries; - For each block 2 hash links are allocated + For each block 2 hash links are allocated */ - if ((_my_block_root=(BLOCK_LINK*) my_malloc((uint) length,MYF(0)))) + if ((my_block_root=(BLOCK_LINK*) my_malloc((uint) length,MYF(0)))) break; - my_free_lock(_my_block_mem,MYF(0)); + my_free_lock(my_block_mem,MYF(0)); } - if (blocks < 8) + if (blocks < 8) { my_errno=ENOMEM; goto err; } blocks=blocks/4*3; } - _my_disk_blocks=(int) blocks; - _my_hash_links=hash_links; - _my_hash_root=(HASH_LINK**) (_my_block_root+blocks); - _my_hash_link_root=(HASH_LINK*) (_my_hash_root+_my_hash_entries); - bzero((byte*) _my_block_root,_my_disk_blocks*sizeof(BLOCK_LINK)); - bzero((byte*) _my_hash_root,_my_hash_entries*sizeof(HASH_LINK*)); - bzero((byte*) _my_hash_link_root,_my_hash_links*sizeof(HASH_LINK)); - _my_hash_links_used=0; - _my_free_hash_list=NULL; - _my_blocks_used=_my_blocks_changed=0; + my_disk_blocks=(int) blocks; + my_hash_links=hash_links; + my_hash_root= (HASH_LINK**) ((char*) my_block_root + + ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); + my_hash_link_root= (HASH_LINK*) ((char*) my_hash_root + + ALIGN_SIZE((sizeof(HASH_LINK*) * + my_hash_entries))); + bzero((byte*) my_block_root, my_disk_blocks*sizeof(BLOCK_LINK)); + bzero((byte*) my_hash_root, my_hash_entries*sizeof(HASH_LINK*)); + bzero((byte*) my_hash_link_root, my_hash_links*sizeof(HASH_LINK)); + my_hash_links_used=0; + my_free_hash_list=NULL; + my_blocks_used= my_blocks_changed=0; #if defined(KEYCACHE_DEBUG) - _my_blocks_available=0; + my_blocks_available=0; #endif /* The LRU chain is empty after initialization */ - _my_used_last=NULL; - + my_used_last=NULL; + waiting_for_hash_link.last_thread=NULL; waiting_for_block.last_thread=NULL; DBUG_PRINT("exit", ("disk_blocks: %d block_root: %lx hash_entries: %d hash_root: %lx \ hash_links: %d hash_link_root %lx", - _my_disk_blocks,_my_block_root,_my_hash_entries,_my_hash_root, - _my_hash_links,_my_hash_link_root)); + my_disk_blocks, my_block_root, my_hash_entries, my_hash_root, + my_hash_links, my_hash_link_root)); } bzero((gptr) changed_blocks,sizeof(changed_blocks[0])*CHANGED_BLOCKS_HASH); bzero((gptr) file_blocks,sizeof(file_blocks[0])*CHANGED_BLOCKS_HASH); - + DBUG_RETURN((int) blocks); - + err: error=my_errno; - if (_my_block_mem) - my_free_lock((gptr) _my_block_mem,MYF(0)); - if (_my_block_mem) - my_free((gptr) _my_block_root,MYF(0)); + if (my_block_mem) + my_free_lock((gptr) my_block_mem,MYF(0)); + if (my_block_mem) + my_free((gptr) my_block_root,MYF(0)); my_errno=error; DBUG_RETURN(0); } /* - Resize the key cache + Resize the key cache */ int resize_key_cache(ulong use_mem) { @@ -394,51 +396,54 @@ int resize_key_cache(ulong use_mem) end_key_cache(); /* the following will work even if memory is 0 */ blocks=init_key_cache(use_mem); - keycache_pthread_mutex_unlock(&THR_LOCK_keycache); + keycache_pthread_mutex_unlock(&THR_LOCK_keycache); return blocks; } /* - Remove key_cache from memory + Remove key_cache from memory */ + void end_key_cache(void) { DBUG_ENTER("end_key_cache"); - if (_my_disk_blocks > 0) + if (my_disk_blocks > 0) { - if (_my_block_mem) + if (my_block_mem) { - my_free_lock((gptr) _my_block_mem,MYF(0)); - my_free((gptr) _my_block_root,MYF(0)); + my_free_lock((gptr) my_block_mem,MYF(0)); + my_free((gptr) my_block_root,MYF(0)); } - _my_disk_blocks= -1; + my_disk_blocks= -1; } KEYCACHE_DEBUG_CLOSE; key_cache_inited=0; DBUG_PRINT("status", ("used: %d changed: %d w_requests: %ld \ writes: %ld r_requests: %ld reads: %ld", - _my_blocks_used,_my_blocks_changed,_my_cache_w_requests, - _my_cache_write,_my_cache_r_requests,_my_cache_read)); + my_blocks_used, my_blocks_changed, my_cache_w_requests, + my_cache_write, my_cache_r_requests, my_cache_read)); DBUG_VOID_RETURN; } /* end_key_cache */ /* - Link a thread into double-linked queue of waiting threads + Link a thread into double-linked queue of waiting threads */ + static inline void link_into_queue(KEYCACHE_WQUEUE *wqueue, struct st_my_thread_var *thread) -{ +{ struct st_my_thread_var *last; if (! (last=wqueue->last_thread)) - { /* Queue is empty */ + { + /* Queue is empty */ thread->next=thread; thread->prev=&thread->next; } else - { + { thread->prev=last->next->prev; last->next->prev=&thread->next; thread->next=last->next; @@ -448,16 +453,17 @@ static inline void link_into_queue(KEYCACHE_WQUEUE *wqueue, } /* - Unlink a thread from double-linked queue of waiting threads + Unlink a thread from double-linked queue of waiting threads */ + static inline void unlink_from_queue(KEYCACHE_WQUEUE *wqueue, struct st_my_thread_var *thread) -{ +{ KEYCACHE_DBUG_PRINT("unlink_from_queue", ("thread %ld", thread->id)); if (thread->next == thread) /* The queue contains only one member */ wqueue->last_thread=NULL; - else + else { thread->next->prev=thread->prev; *thread->prev=thread->next; @@ -470,16 +476,17 @@ static inline void unlink_from_queue(KEYCACHE_WQUEUE *wqueue, /* - Add a thread to single-linked queue of waiting threads + Add a thread to single-linked queue of waiting threads */ + static inline void add_to_queue(KEYCACHE_WQUEUE *wqueue, struct st_my_thread_var *thread) -{ +{ struct st_my_thread_var *last; if (! (last=wqueue->last_thread)) thread->next=thread; else - { + { thread->next=last->next; last->next=thread; } @@ -488,10 +495,11 @@ static inline void add_to_queue(KEYCACHE_WQUEUE *wqueue, /* - Remove all threads from queue signaling them to proceed + Remove all threads from queue signaling them to proceed */ -static inline void release_queue(KEYCACHE_WQUEUE *wqueue) -{ + +static void release_queue(KEYCACHE_WQUEUE *wqueue) +{ struct st_my_thread_var *last=wqueue->last_thread; struct st_my_thread_var *next=last->next; struct st_my_thread_var *thread; @@ -509,8 +517,9 @@ static inline void release_queue(KEYCACHE_WQUEUE *wqueue) /* - Unlink a block from the chain of dirty/clean blocks + Unlink a block from the chain of dirty/clean blocks */ + static inline void unlink_changed(BLOCK_LINK *block) { if (block->next_changed) @@ -520,8 +529,9 @@ static inline void unlink_changed(BLOCK_LINK *block) /* - Link a block into the chain of dirty/clean blocks + Link a block into the chain of dirty/clean blocks */ + static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead) { block->prev_changed=phead; @@ -532,11 +542,12 @@ static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead) /* - Unlink a block from the chain of dirty/clean blocks, if it's asked for, - and link it to the chain of clean blocks for the specified file + Unlink a block from the chain of dirty/clean blocks, if it's asked for, + and link it to the chain of clean blocks for the specified file */ -static inline void link_to_file_list(BLOCK_LINK *block,int file, - my_bool unlink) + +static void link_to_file_list(BLOCK_LINK *block,int file, + my_bool unlink) { if (unlink) unlink_changed(block); @@ -544,27 +555,29 @@ static inline void link_to_file_list(BLOCK_LINK *block,int file, if (block->status & BLOCK_CHANGED) { block->status&=~BLOCK_CHANGED; - _my_blocks_changed--; + my_blocks_changed--; } } -/* - Unlink a block from the chain of clean blocks for the specified - file and link it to the chain of dirty blocks for this file +/* + Unlink a block from the chain of clean blocks for the specified + file and link it to the chain of dirty blocks for this file */ + static inline void link_to_changed_list(BLOCK_LINK *block) { unlink_changed(block); link_changed(block,&changed_blocks[FILE_HASH(block->hash_link->file)]); block->status|=BLOCK_CHANGED; - _my_blocks_changed++; + my_blocks_changed++; } /* - Link a block to the LRU chain at the beginning or at the end + Link a block to the LRU chain at the beginning or at the end */ + static void link_block(BLOCK_LINK *block, my_bool at_end) { KEYCACHE_DBUG_ASSERT(! (block->hash_link && block->hash_link->requests)); @@ -579,7 +592,7 @@ static void link_block(BLOCK_LINK *block, my_bool at_end) { thread=next_thread; next_thread=thread->next; - /* + /* We notify about the event all threads that ask for the same page as the first thread in the queue */ @@ -589,78 +602,79 @@ static void link_block(BLOCK_LINK *block, my_bool at_end) unlink_from_queue(&waiting_for_block, thread); block->requests++; } - } + } while (thread != last_thread); hash_link->block=block; KEYCACHE_THREAD_TRACE("link_block: after signaling"); #if defined(KEYCACHE_DEBUG) - KEYCACHE_DBUG_PRINT("link_block", + KEYCACHE_DBUG_PRINT("link_block", ("linked,unlinked block %u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block),block->status, - block->requests,_my_blocks_available)); + block->requests, my_blocks_available)); #endif return; } - if (_my_used_last) + if (my_used_last) { - _my_used_last->next_used->prev_used=&block->next_used; - block->next_used=_my_used_last->next_used; - block->prev_used=&_my_used_last->next_used; - _my_used_last->next_used=block; + my_used_last->next_used->prev_used=&block->next_used; + block->next_used= my_used_last->next_used; + block->prev_used= &my_used_last->next_used; + my_used_last->next_used=block; if (at_end) - _my_used_last=block; + my_used_last=block; } else { /* The LRU chain is empty */ - _my_used_last=block->next_used=block; + my_used_last=block->next_used=block; block->prev_used=&block->next_used; } KEYCACHE_THREAD_TRACE("link_block"); #if defined(KEYCACHE_DEBUG) - _my_blocks_available++; - KEYCACHE_DBUG_PRINT("link_block", + my_blocks_available++; + KEYCACHE_DBUG_PRINT("link_block", ("linked block %u:%1u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block),at_end,block->status, - block->requests,_my_blocks_available)); - KEYCACHE_DBUG_ASSERT(_my_blocks_available <= _my_blocks_used); + block->requests, my_blocks_available)); + KEYCACHE_DBUG_ASSERT(my_blocks_available <= my_blocks_used); #endif } /* - Unlink a block from the LRU chain + Unlink a block from the LRU chain */ -static inline void unlink_block(BLOCK_LINK *block) + +static void unlink_block(BLOCK_LINK *block) { if (block->next_used == block) /* The list contains only one member */ - _my_used_last=NULL; - else + my_used_last=NULL; + else { block->next_used->prev_used=block->prev_used; *block->prev_used=block->next_used; - if (_my_used_last == block) - _my_used_last=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); + if (my_used_last == block) + my_used_last=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used); } block->next_used=NULL; - + KEYCACHE_THREAD_TRACE("unlink_block"); #if defined(KEYCACHE_DEBUG) - _my_blocks_available--; - KEYCACHE_DBUG_PRINT("unlink_block", + my_blocks_available--; + KEYCACHE_DBUG_PRINT("unlink_block", ("unlinked block %u status=%x #requests=%u #available=%u", BLOCK_NUMBER(block),block->status, - block->requests,_my_blocks_available)); - KEYCACHE_DBUG_ASSERT(_my_blocks_available >= 0); + block->requests, my_blocks_available)); + KEYCACHE_DBUG_ASSERT(my_blocks_available >= 0); #endif } /* - Register requests for a block + Register requests for a block */ -static inline void reg_requests(BLOCK_LINK *block, int count) +static void reg_requests(BLOCK_LINK *block, int count) { if (! block->requests) /* First request for the block unlinks it */ @@ -669,10 +683,11 @@ static inline void reg_requests(BLOCK_LINK *block, int count) } -/* - Unregister request for a block - linking it to the LRU chain if it's the last request +/* + Unregister request for a block + linking it to the LRU chain if it's the last request */ + static inline void unreg_request(BLOCK_LINK *block, int at_end) { if (! --block->requests) @@ -680,18 +695,19 @@ static inline void unreg_request(BLOCK_LINK *block, int at_end) } /* - Remove a reader of the page in block + Remove a reader of the page in block */ + static inline void remove_reader(BLOCK_LINK *block) -{ +{ if (! --block->hash_link->requests && block->condvar) keycache_pthread_cond_signal(block->condvar); } /* - Wait until the last reader of the page in block - signals on its termination + Wait until the last reader of the page in block + signals on its termination */ static inline void wait_for_readers(BLOCK_LINK *block) { @@ -706,8 +722,9 @@ static inline void wait_for_readers(BLOCK_LINK *block) /* - add a hash link to a bucket in the hash_table + Add a hash link to a bucket in the hash_table */ + static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) { if (*start) @@ -719,9 +736,10 @@ static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link) /* - Remove a hash link from the hash table + Remove a hash link from the hash table */ -static inline void unlink_hash(HASH_LINK *hash_link) + +static void unlink_hash(HASH_LINK *hash_link) { KEYCACHE_DBUG_PRINT("unlink_hash", ("file %u, filepos %lu #requests=%u", (uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests)); @@ -730,7 +748,8 @@ static inline void unlink_hash(HASH_LINK *hash_link) hash_link->next->prev=hash_link->prev; hash_link->block=NULL; if (waiting_for_hash_link.last_thread) - { /* Signal that A free hash link appeared */ + { + /* Signal that A free hash link appeared */ struct st_my_thread_var *last_thread=waiting_for_hash_link.last_thread; struct st_my_thread_var *first_thread=last_thread->next; struct st_my_thread_var *next_thread=first_thread; @@ -745,7 +764,7 @@ static inline void unlink_hash(HASH_LINK *hash_link) thread=next_thread; page= (KEYCACHE_PAGE *) thread->opt_info; next_thread=thread->next; - /* + /* We notify about the event all threads that ask for the same page as the first thread in the queue */ @@ -756,18 +775,20 @@ static inline void unlink_hash(HASH_LINK *hash_link) } } while (thread != last_thread); - link_hash(&_my_hash_root[KEYCACHE_HASH(hash_link->file, - hash_link->diskpos)], hash_link); + link_hash(&my_hash_root[KEYCACHE_HASH(hash_link->file, + hash_link->diskpos)], hash_link); return; - } - hash_link->next=_my_free_hash_list; - _my_free_hash_list=hash_link; + } + hash_link->next= my_free_hash_list; + my_free_hash_list=hash_link; } + /* - Get the hash link for a page + Get the hash link for a page */ -static inline HASH_LINK *get_hash_link(int file, my_off_t filepos) + +static HASH_LINK *get_hash_link(int file, my_off_t filepos) { reg1 HASH_LINK *hash_link, **start; KEYCACHE_PAGE page; @@ -784,7 +805,7 @@ restart: start contains the head of the bucket list, hash_link points to the first member of the list */ - hash_link=*(start=&_my_hash_root[KEYCACHE_HASH(file, filepos)]); + hash_link= *(start= &my_hash_root[KEYCACHE_HASH(file, filepos)]); #if defined(KEYCACHE_DEBUG) cnt=0; #endif @@ -795,7 +816,7 @@ restart: hash_link= hash_link->next; #if defined(KEYCACHE_DEBUG) cnt++; - if (! (cnt <= _my_hash_links_used)) + if (! (cnt <= my_hash_links_used)) { int i; for (i=0, hash_link=*start ; @@ -805,22 +826,24 @@ restart: (uint) hash_link->file,(ulong) hash_link->diskpos)); } } - KEYCACHE_DBUG_ASSERT(n <= _my_hash_links_used); + KEYCACHE_DBUG_ASSERT(n <= my_hash_links_used); #endif } if (! hash_link) - { /* There is no hash link in the hash table for the pair (file, filepos) */ - if (_my_free_hash_list) + { + /* There is no hash link in the hash table for the pair (file, filepos) */ + if (my_free_hash_list) { - hash_link=_my_free_hash_list; - _my_free_hash_list=hash_link->next; + hash_link= my_free_hash_list; + my_free_hash_list=hash_link->next; } - else if (_my_hash_links_used < _my_hash_links) + else if (my_hash_links_used < my_hash_links) { - hash_link= &_my_hash_link_root[_my_hash_links_used++]; + hash_link= &my_hash_link_root[my_hash_links_used++]; } else - { /* Wait for a free hash link */ + { + /* Wait for a free hash link */ struct st_my_thread_var *thread=my_thread_var; KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting")); page.file=file; page.filepos=filepos; @@ -836,16 +859,17 @@ restart: } /* Register the request for the page */ hash_link->requests++; - + return hash_link; } /* - Get a block for the file page requested by a keycache read/write operation; - If the page is not in the cache return a free block, if there is none - return the lru block after saving its buffer if the page is dirty -*/ + Get a block for the file page requested by a keycache read/write operation; + If the page is not in the cache return a free block, if there is none + return the lru block after saving its buffer if the page is dirty +*/ + static BLOCK_LINK *find_key_block(int file, my_off_t filepos, int wrmode, int *page_st) { @@ -853,7 +877,7 @@ static BLOCK_LINK *find_key_block(int file, my_off_t filepos, BLOCK_LINK *block; int error=0; int page_status; - + DBUG_ENTER("find_key_block"); KEYCACHE_THREAD_TRACE("find_key_block:begin"); DBUG_PRINT("enter", ("file %u, filepos %lu, wrmode %lu", @@ -863,22 +887,22 @@ static BLOCK_LINK *find_key_block(int file, my_off_t filepos, #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2",test_key_cache("start of find_key_block",0);); #endif - + restart: /* Find the hash link for the requested page (file, filepos) */ hash_link=get_hash_link(file, filepos); - + page_status=-1; if ((block=hash_link->block) && block->hash_link == hash_link && (block->status & BLOCK_READ)) page_status=PAGE_READ; - - if (page_status == PAGE_READ && (block->status & BLOCK_IN_SWITCH)) - { /* This is a request for a page to be removed from cache */ - - KEYCACHE_DBUG_PRINT("find_key_block", + + if (page_status == PAGE_READ && (block->status & BLOCK_IN_SWITCH)) + { + /* This is a request for a page to be removed from cache */ + KEYCACHE_DBUG_PRINT("find_key_block", ("request for old page in block %u",BLOCK_NUMBER(block))); - /* + /* Only reading requests can proceed until the old dirty page is flushed, all others are to be suspended, then resubmitted */ @@ -887,7 +911,7 @@ restart: else { hash_link->requests--; - KEYCACHE_DBUG_PRINT("find_key_block", + KEYCACHE_DBUG_PRINT("find_key_block", ("request waiting for old page to be saved")); { struct st_my_thread_var *thread=my_thread_var; @@ -900,47 +924,51 @@ restart: } while(thread->next); } - KEYCACHE_DBUG_PRINT("find_key_block", + KEYCACHE_DBUG_PRINT("find_key_block", ("request for old page resubmitted")); /* Resubmit the request */ goto restart; } } else - { /* This is a request for a new page or for a page not to be removed */ + { + /* This is a request for a new page or for a page not to be removed */ if (! block) - { /* No block is assigned for the page yet */ - if (_my_blocks_used < (uint) _my_disk_blocks) - { /* There are some never used blocks, take first of them */ - hash_link->block=block= &_my_block_root[_my_blocks_used]; - block->buffer=ADD_TO_PTR(_my_block_mem, - ((ulong) _my_blocks_used*key_cache_block_size), + { + /* No block is assigned for the page yet */ + if (my_blocks_used < (uint) my_disk_blocks) + { + /* There are some never used blocks, take first of them */ + hash_link->block=block= &my_block_root[my_blocks_used]; + block->buffer=ADD_TO_PTR(my_block_mem, + ((ulong) my_blocks_used*key_cache_block_size), byte*); block->status=0; block->length=0; block->offset=key_cache_block_size; block->requests=1; - _my_blocks_used++; + my_blocks_used++; link_to_file_list(block, file, 0); block->hash_link=hash_link; page_status=PAGE_TO_BE_READ; - KEYCACHE_DBUG_PRINT("find_key_block", + KEYCACHE_DBUG_PRINT("find_key_block", ("got never used block %u",BLOCK_NUMBER(block))); } else - { /* There are no never used blocks, use a block from the LRU chain */ + { + /* There are no never used blocks, use a block from the LRU chain */ /* - Wait until a new block is added to the LRU chain; + Wait until a new block is added to the LRU chain; several threads might wait here for the same page, all of them must get the same block */ - - if (! _my_used_last) + + if (! my_used_last) { struct st_my_thread_var *thread=my_thread_var; thread->opt_info=(void *) hash_link; link_into_queue(&waiting_for_block, thread); - do + do { keycache_pthread_cond_wait(&thread->suspend,&THR_LOCK_keycache); } @@ -950,50 +978,53 @@ restart: block=hash_link->block; if (! block) { - /* - Take the first block from the LRU chain + /* + Take the first block from the LRU chain unlinking it from the chain */ - block=_my_used_last->next_used; + block= my_used_last->next_used; reg_requests(block,1); hash_link->block=block; } - - if (block->hash_link != hash_link && ! (block->status & BLOCK_IN_SWITCH) ) - { /* this is a primary request for a new page */ + + if (block->hash_link != hash_link && + ! (block->status & BLOCK_IN_SWITCH) ) + { + /* this is a primary request for a new page */ block->status|=BLOCK_IN_SWITCH; - - KEYCACHE_DBUG_PRINT("find_key_block", + + KEYCACHE_DBUG_PRINT("find_key_block", ("got block %u for new page",BLOCK_NUMBER(block))); - + if (block->status & BLOCK_CHANGED) - { /* The block contains a dirty page - push it out of the cache */ - + { + /* The block contains a dirty page - push it out of the cache */ + KEYCACHE_DBUG_PRINT("find_key_block",("block is dirty")); - + keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - /* - The call is thread safe because only the current - thread might change the block->hash_link value + /* + The call is thread safe because only the current + thread might change the block->hash_link value */ error=my_pwrite(block->hash_link->file,block->buffer, block->length,block->hash_link->diskpos, - MYF(MY_NABP | MY_WAIT_IF_FULL)); + MYF(MY_NABP | MY_WAIT_IF_FULL)); keycache_pthread_mutex_lock(&THR_LOCK_keycache); - _my_cache_write++; + my_cache_write++; } - + block->status|=BLOCK_REASSIGNED; if (block->hash_link) { - /* - Wait until all pending read requests - for this page are executed - (we could have avoided this waiting, if we had read - a page in the cache in a sweep, without yielding control) + /* + Wait until all pending read requests + for this page are executed + (we could have avoided this waiting, if we had read + a page in the cache in a sweep, without yielding control) */ wait_for_readers(block); - + /* Remove the hash link for this page from the hash table */ unlink_hash(block->hash_link); /* All pending requests for this page must be resubmitted */ @@ -1006,7 +1037,7 @@ restart: block->offset=key_cache_block_size; block->hash_link=hash_link; page_status=PAGE_TO_BE_READ; - + KEYCACHE_DBUG_ASSERT(block->hash_link->block == block); KEYCACHE_DBUG_ASSERT(hash_link->block->hash_link == hash_link); } @@ -1018,8 +1049,8 @@ restart: PAGE_READ : PAGE_WAIT_TO_BE_READ; } } - - _my_cache_read++; + + my_cache_read++; } else { @@ -1029,10 +1060,10 @@ restart: PAGE_READ : PAGE_WAIT_TO_BE_READ; } } - + KEYCACHE_DBUG_ASSERT(page_status != -1); *page_st=page_status; - + #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache2",test_key_cache("end of find_key_block",0);); #endif @@ -1042,55 +1073,58 @@ restart: /* - Read into a key cache block buffer from disk; - do not to report error when the size of successfully read - portion is less than read_length, but not less than min_length + Read into a key cache block buffer from disk; + do not to report error when the size of successfully read + portion is less than read_length, but not less than min_length */ + static void read_block(BLOCK_LINK *block, uint read_length, uint min_length, my_bool primary) { uint got_length; - + /* On entry THR_LOCK_keycache is locked */ - + KEYCACHE_THREAD_TRACE("read_block"); if (primary) - { /* - This code is executed only by threads - that submitted primary requests + { + /* + This code is executed only by threads + that submitted primary requests */ - - KEYCACHE_DBUG_PRINT("read_block", + + KEYCACHE_DBUG_PRINT("read_block", ("page to be read by primary request")); - + /* Page is not in buffer yet, is to be read from disk */ keycache_pthread_mutex_unlock(&THR_LOCK_keycache); got_length=my_pread(block->hash_link->file,block->buffer, read_length,block->hash_link->diskpos,MYF(0)); keycache_pthread_mutex_lock(&THR_LOCK_keycache); - if (got_length < min_length) + if (got_length < min_length) block->status|=BLOCK_ERROR; else { block->status=BLOCK_READ; block->length=got_length; } - KEYCACHE_DBUG_PRINT("read_block", + KEYCACHE_DBUG_PRINT("read_block", ("primary request: new page in cache")); /* Signal that all pending requests for this page now can be processed */ if (block->wqueue[COND_FOR_REQUESTED].last_thread) release_queue(&block->wqueue[COND_FOR_REQUESTED]); } - else - { /* - This code is executed only by threads - that submitted secondary requests + else + { + /* + This code is executed only by threads + that submitted secondary requests */ - KEYCACHE_DBUG_PRINT("read_block", + KEYCACHE_DBUG_PRINT("read_block", ("secondary request waiting for new page to be read")); { struct st_my_thread_var *thread=my_thread_var; - /* Put the request into a queue and wait until it can be processed */ + /* Put the request into a queue and wait until it can be processed */ add_to_queue(&block->wqueue[COND_FOR_REQUESTED],thread); do { @@ -1098,44 +1132,45 @@ static void read_block(BLOCK_LINK *block, uint read_length, } while (thread->next); } - KEYCACHE_DBUG_PRINT("read_block", + KEYCACHE_DBUG_PRINT("read_block", ("secondary request: new page in cache")); } } /* - Read a block of data from a cached file into a buffer; - if return_buffer is set then the cache buffer is returned if - it can be used; - filepos must be a multiple of 'block_length', but it doesn't - have to be a multiple of key_cache_block_size; - returns adress from where data is read + Read a block of data from a cached file into a buffer; + if return_buffer is set then the cache buffer is returned if + it can be used; + filepos must be a multiple of 'block_length', but it doesn't + have to be a multiple of key_cache_block_size; + returns adress from where data is read */ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, - uint block_length __attribute__((unused)), - int return_buffer __attribute__((unused))) + uint block_length __attribute__((unused)), + int return_buffer __attribute__((unused))) { int error=0; DBUG_ENTER("key_cache_read"); DBUG_PRINT("enter", ("file %u, filepos %lu, length %u", (uint) file,(ulong) filepos,length)); - - if (_my_disk_blocks > 0) - { /* Key cache is used */ + + if (my_disk_blocks > 0) + { + /* Key cache is used */ reg1 BLOCK_LINK *block; uint offset= (uint) (filepos & (key_cache_block_size-1)); byte *start=buff; uint read_length; uint status; int page_st; - + #ifndef THREAD if (block_length > key_cache_block_size || offset) return_buffer=0; #endif - + /* Read data in key_cache_block_size increments */ filepos-= offset; do @@ -1144,7 +1179,7 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, key_cache_block_size : length; KEYCACHE_DBUG_ASSERT(read_length > 0); keycache_pthread_mutex_lock(&THR_LOCK_keycache); - _my_cache_r_requests++; + my_cache_r_requests++; block=find_key_block(file,filepos,0,&page_st); if (page_st != PAGE_READ) { @@ -1154,26 +1189,26 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, } else if (! (block->status & BLOCK_ERROR) && block->length < read_length + offset) - { - /* - Impossible if nothing goes wrong: - this could only happen if we are using a file with - small key blocks and are trying to read outside the file + { + /* + Impossible if nothing goes wrong: + this could only happen if we are using a file with + small key blocks and are trying to read outside the file */ my_errno=-1; block->status|=BLOCK_ERROR; } - + if (! ((status=block->status) & BLOCK_ERROR)) { #ifndef THREAD - if (! return_buffer) + if (! return_buffer) #endif { #if !defined(SERIALIZED_READ_FROM_CACHE) keycache_pthread_mutex_unlock(&THR_LOCK_keycache); #endif - + /* Copy data from the cache buffer */ if (!(read_length & 511)) bmove512(buff,block->buffer+offset,read_length); @@ -1185,35 +1220,35 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, #endif } } - + remove_reader(block); - /* - Link the block into the LRU chain - if it's the last submitted request for the block + /* + Link the block into the LRU chain + if it's the last submitted request for the block */ unreg_request(block,1); - + keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - + if (status & BLOCK_ERROR) DBUG_RETURN((byte *) 0); - + #ifndef THREAD if (return_buffer) return (block->buffer); #endif - + buff+=read_length; filepos+=read_length; offset=0; - + } while ((length-= read_length)); DBUG_RETURN(start); } - + /* Key cache is not used */ - statistic_increment(_my_cache_r_requests,&THR_LOCK_keycache); - statistic_increment(_my_cache_read,&THR_LOCK_keycache); + statistic_increment(my_cache_r_requests,&THR_LOCK_keycache); + statistic_increment(my_cache_read,&THR_LOCK_keycache); if (my_pread(file,(byte*) buff,length,filepos,MYF(MY_NABP))) error=1; DBUG_RETURN(error? (byte*) 0 : buff); @@ -1221,12 +1256,13 @@ byte *key_cache_read(File file, my_off_t filepos, byte *buff, uint length, /* - Write a buffer into disk; - filepos must be a multiple of 'block_length', but it doesn't - have to be a multiple of key cache block size; - if !dont_write then all dirty pages involved in writing should - have been flushed from key cache before the function starts + Write a buffer into disk; + filepos must be a multiple of 'block_length', but it doesn't + have to be a multiple of key cache block size; + if !dont_write then all dirty pages involved in writing should + have been flushed from key cache before the function starts */ + int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, uint block_length __attribute__((unused)), int dont_write) @@ -1239,31 +1275,33 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, (uint) file,(ulong) filepos,length,block_length)); if (!dont_write) - { /* Force writing from buff into disk */ - statistic_increment(_my_cache_write, &THR_LOCK_keycache); + { + /* Force writing from buff into disk */ + statistic_increment(my_cache_write, &THR_LOCK_keycache); if (my_pwrite(file,buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL))) DBUG_RETURN(1); } - + #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache",test_key_cache("start of key_cache_write",1);); #endif - - if (_my_disk_blocks > 0) - { /* Key cache is used */ + + if (my_disk_blocks > 0) + { + /* Key cache is used */ uint read_length; uint offset= (uint) (filepos & (key_cache_block_size-1)); int page_st; - + /* Write data in key_cache_block_size increments */ filepos-= offset; do { read_length= length > key_cache_block_size ? - key_cache_block_size : length; + key_cache_block_size : length; KEYCACHE_DBUG_ASSERT(read_length > 0); keycache_pthread_mutex_lock(&THR_LOCK_keycache); - _my_cache_w_requests++; + my_cache_w_requests++; block=find_key_block(file, filepos, 1, &page_st); if (page_st != PAGE_READ && (offset || read_length < key_cache_block_size)) @@ -1271,19 +1309,20 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, offset + read_length >= key_cache_block_size? offset : key_cache_block_size, offset,(my_bool)(page_st == PAGE_TO_BE_READ)); - + if (!dont_write) - { /* buff has been written to disk at start */ - if ((block->status & BLOCK_CHANGED) && + { + /* buff has been written to disk at start */ + if ((block->status & BLOCK_CHANGED) && (!offset && read_length >= key_cache_block_size)) link_to_file_list(block, block->hash_link->file, 1); } else if (! (block->status & BLOCK_CHANGED)) link_to_changed_list(block); - + set_if_smaller(block->offset,offset) set_if_bigger(block->length,read_length+offset); - + if (! (block->status & BLOCK_ERROR)) { if (!(read_length & 511)) @@ -1291,26 +1330,26 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, else memcpy(block->buffer+offset,buff,(size_t) read_length); } - - block->status|=BLOCK_READ; - + + block->status|=BLOCK_READ; + /* Unregister the request */ block->hash_link->requests--; unreg_request(block,1); - + if (block->status & BLOCK_ERROR) { keycache_pthread_mutex_unlock(&THR_LOCK_keycache); error=1; break; } - + keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - + buff+=read_length; filepos+=read_length; offset=0; - + } while ((length-= read_length)); } else @@ -1318,8 +1357,8 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, /* Key cache is not used */ if (dont_write) { - statistic_increment(_my_cache_w_requests, &THR_LOCK_keycache); - statistic_increment(_my_cache_write, &THR_LOCK_keycache); + statistic_increment(my_cache_w_requests, &THR_LOCK_keycache); + statistic_increment(my_cache_write, &THR_LOCK_keycache); if (my_pwrite(file,(byte*) buff,length,filepos,MYF(MY_NABP | MY_WAIT_IF_FULL))) error=1; } @@ -1332,32 +1371,33 @@ int key_cache_write(File file, my_off_t filepos, byte *buff, uint length, } -/* - Free block: remove reference to it from hash table, - remove it from the chain file of dirty/clean blocks - and add it at the beginning of the LRU chain -*/ +/* + Free block: remove reference to it from hash table, + remove it from the chain file of dirty/clean blocks + and add it at the beginning of the LRU chain +*/ + static void free_block(BLOCK_LINK *block) { KEYCACHE_THREAD_TRACE("free block"); - KEYCACHE_DBUG_PRINT("free_block", - ("block %u to be freed",BLOCK_NUMBER(block))); - if (block->hash_link) + KEYCACHE_DBUG_PRINT("free_block", + ("block %u to be freed",BLOCK_NUMBER(block))); + if (block->hash_link) { block->status|=BLOCK_REASSIGNED; wait_for_readers(block); unlink_hash(block->hash_link); } - + unlink_changed(block); block->status=0; block->length=0; block->offset=key_cache_block_size; KEYCACHE_THREAD_TRACE("free block"); - KEYCACHE_DBUG_PRINT("free_block", + KEYCACHE_DBUG_PRINT("free_block", ("block is freed")); unreg_request(block,0); - block->hash_link=NULL; + block->hash_link=NULL; } @@ -1368,10 +1408,11 @@ static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b) } -/* - Flush a portion of changed blocks to disk, - free used blocks if requested +/* + Flush a portion of changed blocks to disk, + free used blocks if requested */ + static int flush_cached_blocks(File file, BLOCK_LINK **cache, BLOCK_LINK **end, enum flush_type type) @@ -1379,27 +1420,27 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache, int error; int last_errno=0; uint count=end-cache; - + /* Don't lock the cache during the flush */ keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - /* - As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH - we are guarunteed no thread will change them + /* + As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH + we are guarunteed no thread will change them */ qsort((byte*) cache,count,sizeof(*cache),(qsort_cmp) cmp_sec_link); - + keycache_pthread_mutex_lock(&THR_LOCK_keycache); for ( ; cache != end ; cache++) { BLOCK_LINK *block= *cache; - - KEYCACHE_DBUG_PRINT("flush_cached_blocks", + + KEYCACHE_DBUG_PRINT("flush_cached_blocks", ("block %u to be flushed", BLOCK_NUMBER(block))); keycache_pthread_mutex_unlock(&THR_LOCK_keycache); error=my_pwrite(file,block->buffer+block->offset,block->length, block->hash_link->diskpos,MYF(MY_NABP | MY_WAIT_IF_FULL)); keycache_pthread_mutex_lock(&THR_LOCK_keycache); - _my_cache_write++; + my_cache_write++; if (error) { block->status|= BLOCK_ERROR; @@ -1409,42 +1450,44 @@ static int flush_cached_blocks(File file, BLOCK_LINK **cache, /* type will never be FLUSH_IGNORE_CHANGED here */ if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) { - _my_blocks_changed--; + my_blocks_changed--; free_block(block); } - else + else { block->status&=~BLOCK_IN_FLUSH; link_to_file_list(block,file,1); unreg_request(block,1); } - + } return last_errno; } /* - Flush all blocks for a file to disk + Flush all blocks for a file to disk */ + int flush_key_blocks(File file, enum flush_type type) { int last_errno=0; BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache; DBUG_ENTER("flush_key_blocks"); DBUG_PRINT("enter",("file: %d blocks_used: %d blocks_changed: %d", - file,_my_blocks_used,_my_blocks_changed)); - + file, my_blocks_used, my_blocks_changed)); + #if !defined(DBUG_OFF) && defined(EXTRA_DEBUG) DBUG_EXECUTE("check_keycache",test_key_cache("start of flush_key_blocks",0);); #endif - + keycache_pthread_mutex_lock(&THR_LOCK_keycache); - - cache=cache_buff; - if (_my_disk_blocks > 0 && + + cache=cache_buff; + if (my_disk_blocks > 0 && (!my_disable_flush_key_blocks || type != FLUSH_KEEP)) - { /* Key cache exists and flush is not disabled */ + { + /* Key cache exists and flush is not disabled */ int error=0; uint count=0; BLOCK_LINK **pos,**end; @@ -1453,10 +1496,10 @@ int flush_key_blocks(File file, enum flush_type type) #if defined(KEYCACHE_DEBUG) uint cnt=0; #endif - + if (type != FLUSH_IGNORE_CHANGED) { - /* + /* Count how many key blocks we have to cache to be able to flush all dirty pages with minimum seek moves */ @@ -1467,18 +1510,18 @@ int flush_key_blocks(File file, enum flush_type type) if (block->hash_link->file == file) { count++; - KEYCACHE_DBUG_ASSERT(count<=_my_blocks_used); + KEYCACHE_DBUG_ASSERT(count<= my_blocks_used); } } /* Allocate a new buffer only if its bigger than the one we have */ - if (count > FLUSH_CACHE && + if (count > FLUSH_CACHE && !(cache=(BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,MYF(0)))) { - cache=cache_buff; + cache=cache_buff; count=FLUSH_CACHE; } } - + /* Retrieve the blocks and write them to a buffer to be flushed */ restart: end=(pos=cache)+count; @@ -1488,37 +1531,40 @@ restart: { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= _my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); #endif next=block->next_changed; if (block->hash_link->file == file) { - /* + /* Mark the block with BLOCK_IN_FLUSH in order not to let other threads to use it for new pages and interfere with our sequence ot flushing dirty file pages */ block->status|= BLOCK_IN_FLUSH; - + if (! (block->status & BLOCK_IN_SWITCH)) - { /* - We care only for the blocks for which flushing was not - initiated by other threads as a result of page swapping - */ + { + /* + We care only for the blocks for which flushing was not + initiated by other threads as a result of page swapping + */ reg_requests(block,1); - if (type != FLUSH_IGNORE_CHANGED) - { /* It's not a temporary file */ + if (type != FLUSH_IGNORE_CHANGED) + { + /* It's not a temporary file */ if (pos == end) - { /* - This happens only if there is not enough - memory for the big block + { + /* + This happens only if there is not enough + memory for the big block */ if ((error=flush_cached_blocks(file,cache,end,type))) - last_errno=error; - /* - Restart the scan as some other thread might have changed - the changed blocks chain: the blocks that were in switch - state before the flush started have to be excluded + last_errno=error; + /* + Restart the scan as some other thread might have changed + the changed blocks chain: the blocks that were in switch + state before the flush started have to be excluded */ goto restart; } @@ -1527,12 +1573,13 @@ restart: else { /* It's a temporary file */ - _my_blocks_changed--; + my_blocks_changed--; free_block(block); } } else - { /* Link the block into a list of blocks 'in switch' */ + { + /* Link the block into a list of blocks 'in switch' */ unlink_changed(block); link_changed(block,&first_in_switch); } @@ -1561,7 +1608,7 @@ restart: } #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= _my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); #endif } /* The following happens very seldom */ @@ -1576,7 +1623,7 @@ restart: { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= _my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); #endif next=block->next_changed; if (block->hash_link->file == file && @@ -1589,9 +1636,9 @@ restart: } } } - + keycache_pthread_mutex_unlock(&THR_LOCK_keycache); - + #ifndef DBUG_OFF DBUG_EXECUTE("check_keycache", test_key_cache("end of flush_key_blocks",0);); @@ -1604,30 +1651,31 @@ restart: } -/* - Flush all blocks in the key cache to disk +/* + Flush all blocks in the key cache to disk */ + static int flush_all_key_blocks() { #if defined(KEYCACHE_DEBUG) uint cnt=0; #endif - while (_my_blocks_changed > 0) + while (my_blocks_changed > 0) { BLOCK_LINK *block; - for (block=_my_used_last->next_used ; ; block=block->next_used) + for (block= my_used_last->next_used ; ; block=block->next_used) { if (block->hash_link) { #if defined(KEYCACHE_DEBUG) cnt++; - KEYCACHE_DBUG_ASSERT(cnt <= _my_blocks_used); + KEYCACHE_DBUG_ASSERT(cnt <= my_blocks_used); #endif if (flush_key_blocks(block->hash_link->file, FLUSH_RELEASE)) return 1; break; } - if (block == _my_used_last) + if (block == my_used_last) break; } } @@ -1637,13 +1685,13 @@ static int flush_all_key_blocks() #ifndef DBUG_OFF /* - Test if disk-cache is ok + Test if disk-cache is ok */ -static void test_key_cache(const char *where __attribute__((unused)), +static void test_key_cache(const char *where __attribute__((unused)), my_bool lock __attribute__((unused))) { /* TODO */ -} +} #endif #if defined(KEYCACHE_TIMEOUT) @@ -1680,7 +1728,7 @@ static void keycache_dump() break; } while (thread != last); - + i=0; thread=last=waiting_for_block.last_thread; fprintf(keycache_dump_file, "queue of threads waiting for block\n"); @@ -1698,11 +1746,11 @@ static void keycache_dump() } while (thread != last); - for (i=0 ; i< _my_blocks_used ; i++) + for (i=0 ; i< my_blocks_used ; i++) { int j; - block=&_my_block_root[i]; - hash_link=block->hash_link; + block= &my_block_root[i]; + hash_link= block->hash_link; fprintf(keycache_dump_file, "block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d\n", i, (int) (hash_link ? HASH_LINK_NUMBER(hash_link) : -1), @@ -1713,6 +1761,7 @@ static void keycache_dump() thread=last=wqueue->last_thread; fprintf(keycache_dump_file, "queue #%d\n", j); if (thread) + { do { thread=thread->next; @@ -1722,20 +1771,23 @@ static void keycache_dump() break; } while (thread != last); + } } } fprintf(keycache_dump_file, "LRU chain:"); - block=_my_used_last; + block= my_used_last; if (block) + { do { block=block->next_used; fprintf(keycache_dump_file, "block:%u, ", BLOCK_NUMBER(block)); } - while (block != _my_used_last); + while (block != my_used_last); + } fprintf(keycache_dump_file, "\n"); - + fclose(keycache_dump_file); } @@ -1744,7 +1796,7 @@ static void keycache_dump() #if defined(KEYCACHE_TIMEOUT) && !defined(__WIN__) -static int keycache_pthread_cond_wait(pthread_cond_t *cond, +static int keycache_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { int rc; @@ -1754,8 +1806,8 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond, #if defined(KEYCACHE_DEBUG) int cnt=0; #endif - - /* Get current time */ + + /* Get current time */ gettimeofday(&now, &tz); /* Prepare timeout value */ timeout.tv_sec = now.tv_sec + KEYCACHE_TIMEOUT; @@ -1773,16 +1825,16 @@ static int keycache_pthread_cond_wait(pthread_cond_t *cond, KEYCACHE_THREAD_TRACE_BEGIN("finished waiting"); #if defined(KEYCACHE_DEBUG) if (rc == ETIMEDOUT) - { + { fprintf(keycache_debug_log,"aborted by keycache timeout\n"); fclose(keycache_debug_log); abort(); } #endif - + if (rc == ETIMEDOUT) keycache_dump(); - + #if defined(KEYCACHE_DEBUG) KEYCACHE_DBUG_ASSERT(rc != ETIMEDOUT); #else @@ -1867,4 +1919,3 @@ void keycache_debug_log_close(void) #endif /* defined(KEYCACHE_DEBUG_LOG) */ #endif /* defined(KEYCACHE_DEBUG) */ - |