summaryrefslogtreecommitdiff
path: root/mysys/mf_keycache.c
diff options
context:
space:
mode:
Diffstat (limited to 'mysys/mf_keycache.c')
-rw-r--r--mysys/mf_keycache.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index a03d71f32d8..7cbd35580f1 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -102,9 +102,11 @@
*/
#include "mysys_priv.h"
+#include "mysys_err.h"
#include <keycache.h>
#include "my_static.h"
#include <m_string.h>
+#include <my_bit.h>
#include <errno.h>
#include <stdarg.h>
@@ -430,7 +432,7 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
/* Allocate memory for cache page buffers */
if ((keycache->block_mem=
my_large_malloc((size_t) blocks * keycache->key_cache_block_size,
- MYF(MY_WME))))
+ MYF(0))))
{
/*
Allocate memory for blocks, hash_links and hash entries;
@@ -445,6 +447,7 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
if (blocks < 8)
{
my_errno= ENOMEM;
+ my_error(EE_OUTOFMEMORY, MYF(0), blocks * keycache->key_cache_block_size);
goto err;
}
blocks= blocks / 4*3;
@@ -1262,12 +1265,12 @@ static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block)
KEYCACHE_THREAD_TRACE("unlink_block");
#if defined(KEYCACHE_DEBUG)
+ KEYCACHE_DBUG_ASSERT(keycache->blocks_available != 0);
keycache->blocks_available--;
KEYCACHE_DBUG_PRINT("unlink_block",
("unlinked block %u status=%x #requests=%u #available=%u",
BLOCK_NUMBER(block), block->status,
block->requests, keycache->blocks_available));
- KEYCACHE_DBUG_ASSERT(keycache->blocks_available >= 0);
#endif
}
@@ -2360,9 +2363,9 @@ restart:
(block->hash_link->diskpos == filepos)));
*page_st=page_status;
KEYCACHE_DBUG_PRINT("find_key_block",
- ("fd: %d pos: %lu block->status: %u page_status: %u",
+ ("fd: %d pos: %lu block->status: %u page_status: %d",
file, (ulong) filepos, block->status,
- (uint) page_status));
+ page_status));
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
DBUG_EXECUTE("check_keycache2",
@@ -2513,10 +2516,10 @@ static void read_block(KEY_CACHE *keycache,
*/
uchar *key_cache_read(KEY_CACHE *keycache,
- File file, my_off_t filepos, int level,
- uchar *buff, uint length,
- uint block_length __attribute__((unused)),
- int return_buffer __attribute__((unused)))
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length __attribute__((unused)),
+ int return_buffer __attribute__((unused)))
{
my_bool locked_and_incremented= FALSE;
int error=0;
@@ -2534,12 +2537,12 @@ uchar *key_cache_read(KEY_CACHE *keycache,
uint status;
int page_st;
- /*
+ /*
When the key cache is once initialized, we use the cache_lock to
reliably distinguish the cases of normal operation, resizing, and
disabled cache. We always increment and decrement
'cnt_for_resize_op' so that a resizer can wait for pending I/O.
- */
+ */
keycache_pthread_mutex_lock(&keycache->cache_lock);
/*
Cache resizing has two phases: Flushing and re-initializing. In
@@ -2976,9 +2979,10 @@ int key_cache_write(KEY_CACHE *keycache,
int error=0;
DBUG_ENTER("key_cache_write");
DBUG_PRINT("enter",
- ("fd: %u pos: %lu length: %u block_length: %u key_block_length: %u",
- (uint) file, (ulong) filepos, length, block_length,
- keycache ? keycache->key_cache_block_size : 0));
+ ("fd: %u pos: %lu length: %u block_length: %u"
+ " key_block_length: %u",
+ (uint) file, (ulong) filepos, length, block_length,
+ keycache ? keycache->key_cache_block_size : 0));
if (!dont_write)
{
@@ -3184,7 +3188,6 @@ int key_cache_write(KEY_CACHE *keycache,
a flush.
*/
block->status&= ~BLOCK_FOR_UPDATE;
-
set_if_smaller(block->offset, offset);
set_if_bigger(block->length, read_length+offset);