summaryrefslogtreecommitdiff
path: root/mysys
diff options
context:
space:
mode:
Diffstat (limited to 'mysys')
-rw-r--r--mysys/lf_dynarray.c92
-rw-r--r--mysys/lf_hash.c41
-rw-r--r--mysys/my_atomic.c2
3 files changed, 59 insertions, 76 deletions
diff --git a/mysys/lf_dynarray.c b/mysys/lf_dynarray.c
index 0fa04ab095c..ade1c28d51c 100644
--- a/mysys/lf_dynarray.c
+++ b/mysys/lf_dynarray.c
@@ -38,7 +38,7 @@
void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
{
bzero(array, sizeof(*array));
- array->size_of_element=element_size;
+ array->size_of_element= element_size;
my_atomic_rwlock_init(&array->lock);
}
@@ -49,7 +49,7 @@ static void recursive_free(void **alloc, int level)
if (level)
{
int i;
- for (i=0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
+ for (i= 0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
recursive_free(alloc[i], level-1);
my_free((void *)alloc, MYF(0));
}
@@ -60,13 +60,13 @@ static void recursive_free(void **alloc, int level)
void lf_dynarray_destroy(LF_DYNARRAY *array)
{
int i;
- for (i=0; i < LF_DYNARRAY_LEVELS; i++)
+ for (i= 0; i < LF_DYNARRAY_LEVELS; i++)
recursive_free(array->level[i], i);
my_atomic_rwlock_destroy(&array->lock);
bzero(array, sizeof(*array));
}
-static const int dynarray_idxes_in_level[LF_DYNARRAY_LEVELS]=
+static const int dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
{
0, /* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH,
@@ -77,41 +77,32 @@ static const int dynarray_idxes_in_level[LF_DYNARRAY_LEVELS]=
void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
{
- void * ptr, * volatile * ptr_ptr=0;
+ void * ptr, * volatile * ptr_ptr= 0;
int i;
- for (i=3; i > 0; i--)
+ for (i= 3; idx < dynarray_idxes_in_prev_level[i]; i--) /* no-op */;
+ ptr_ptr= &array->level[i];
+ idx-= dynarray_idxes_in_prev_level[i];
+ for (; i > 0; i--)
{
- if (ptr_ptr || idx >= dynarray_idxes_in_level[i])
+ if (!(ptr= *ptr_ptr))
{
- if (!ptr_ptr)
- {
- ptr_ptr=&array->level[i];
- idx-= dynarray_idxes_in_level[i];
- }
- ptr=*ptr_ptr;
- if (!ptr)
- {
- void *alloc=my_malloc(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
- MYF(MY_WME|MY_ZEROFILL));
- if (!alloc)
- return(NULL);
- if (my_atomic_casptr(ptr_ptr, &ptr, alloc))
- ptr= alloc;
- else
- my_free(alloc, MYF(0));
- }
- ptr_ptr=((void **)ptr) + idx / dynarray_idxes_in_level[i];
- idx%= dynarray_idxes_in_level[i];
+ void *alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
+ MYF(MY_WME|MY_ZEROFILL));
+ if (!alloc)
+ return(NULL);
+ if (my_atomic_casptr(ptr_ptr, &ptr, alloc))
+ ptr= alloc;
+ else
+ my_free(alloc, MYF(0));
}
+ ptr_ptr= ((void **)ptr) + idx / dynarray_idxes_in_prev_level[i];
+ idx%= dynarray_idxes_in_prev_level[i];
}
- if (!ptr_ptr)
- ptr_ptr=&array->level[0];
- ptr=*ptr_ptr;
- if (!ptr)
+ if (!(ptr= *ptr_ptr))
{
void *alloc, *data;
- alloc=my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
+ alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
max(array->size_of_element, sizeof(void *)),
MYF(MY_WME|MY_ZEROFILL));
if (!alloc)
@@ -123,7 +114,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
if (mod)
data+= array->size_of_element - mod;
}
- ((void **)data)[-1]=alloc; /* free() will need the original pointer */
+ ((void **)data)[-1]= alloc; /* free() will need the original pointer */
if (my_atomic_casptr(ptr_ptr, &ptr, data))
ptr= data;
else
@@ -134,29 +125,20 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
{
- void * ptr, * volatile * ptr_ptr=0;
+ void * ptr, * volatile * ptr_ptr= 0;
int i;
- for (i=3; i > 0; i--)
+ for (i= 3; idx < dynarray_idxes_in_prev_level[i]; i--) /* no-op */;
+ ptr_ptr= &array->level[i];
+ idx-= dynarray_idxes_in_prev_level[i];
+ for (; i > 0; i--)
{
- if (ptr_ptr || idx >= dynarray_idxes_in_level[i])
- {
- if (!ptr_ptr)
- {
- ptr_ptr=&array->level[i];
- idx-= dynarray_idxes_in_level[i];
- }
- ptr=*ptr_ptr;
- if (!ptr)
- return(NULL);
- ptr_ptr=((void **)ptr) + idx / dynarray_idxes_in_level[i];
- idx %= dynarray_idxes_in_level[i];
- }
+ if (!(ptr= *ptr_ptr))
+ return(NULL);
+ ptr_ptr= ((void **)ptr) + idx / dynarray_idxes_in_prev_level[i];
+ idx %= dynarray_idxes_in_prev_level[i];
}
- if (!ptr_ptr)
- ptr_ptr=&array->level[0];
- ptr=*ptr_ptr;
- if (!ptr)
+ if (!(ptr= *ptr_ptr))
return(NULL);
return ptr + array->size_of_element * idx;
}
@@ -169,8 +151,8 @@ static int recursive_iterate(LF_DYNARRAY *array, void *ptr, int level,
return 0;
if (!level)
return func(ptr, arg);
- for (i=0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
- if ((res=recursive_iterate(array, ((void **)ptr)[i], level-1, func, arg)))
+ for (i= 0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
+ if ((res= recursive_iterate(array, ((void **)ptr)[i], level-1, func, arg)))
return res;
return 0;
}
@@ -178,8 +160,8 @@ static int recursive_iterate(LF_DYNARRAY *array, void *ptr, int level,
int _lf_dynarray_iterate(LF_DYNARRAY *array, lf_dynarray_func func, void *arg)
{
int i, res;
- for (i=0; i < LF_DYNARRAY_LEVELS; i++)
- if ((res=recursive_iterate(array, array->level[i], i, func, arg)))
+ for (i= 0; i < LF_DYNARRAY_LEVELS; i++)
+ if ((res= recursive_iterate(array, array->level[i], i, func, arg)))
return res;
return 0;
}
diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c
index 736c3ea4887..a0425e89556 100644
--- a/mysys/lf_hash.c
+++ b/mysys/lf_hash.c
@@ -19,6 +19,7 @@
TODO
try to get rid of dummy nodes ?
+ for non-unique hash, count only _distinct_ values
*/
#include <my_global.h>
#include <my_sys.h>
@@ -51,7 +52,7 @@ typedef struct {
cursor is positioned in either case
pins[0..2] are used, they are NOT removed on return
*/
-static int lfind(intptr volatile *head, uint32 hashnr,
+static int lfind(LF_SLIST * volatile *head, uint32 hashnr,
const uchar *key, uint keylen, CURSOR *cursor, LF_PINS *pins)
{
uint32 cur_hashnr;
@@ -60,7 +61,7 @@ static int lfind(intptr volatile *head, uint32 hashnr,
intptr link;
retry:
- cursor->prev=head;
+ cursor->prev=(intptr *)head;
do {
cursor->curr=PTR(*cursor->prev);
_lf_pin(pins,1,cursor->curr);
@@ -112,7 +113,7 @@ retry:
/*
RETURN
0 - inserted
- not 0 - a pointer to a conflict
+ not 0 - a pointer to a conflict (not pinned and thus unusable)
NOTE
it uses pins[0..2], on return all pins are removed.
@@ -125,17 +126,17 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, LF_SLIST *node,
do
{
- if (lfind((intptr*)head, node->hashnr, node->key, node->keylen,
+ if (lfind(head, node->hashnr, node->key, node->keylen,
&cursor, pins) &&
(flags & LF_HASH_UNIQUE))
- res=0;
+ res=0; /* duplicate found */
else
{
node->link=(intptr)cursor.curr;
assert(node->link != (intptr)node);
assert(cursor.prev != &node->link);
if (my_atomic_casptr((void **)cursor.prev, (void **)&cursor.curr, node))
- res=1;
+ res=1; /* inserted ok */
}
} while (res == -1);
_lf_unpin(pins, 0);
@@ -159,7 +160,7 @@ static int ldelete(LF_SLIST * volatile *head, uint32 hashnr,
do
{
- if (!lfind((intptr *)head, hashnr, key, keylen, &cursor, pins))
+ if (!lfind(head, hashnr, key, keylen, &cursor, pins))
res= 1;
else
if (my_atomic_casptr((void **)&(cursor.curr->link),
@@ -169,7 +170,7 @@ static int ldelete(LF_SLIST * volatile *head, uint32 hashnr,
(void **)&cursor.curr, cursor.next))
_lf_alloc_free(pins, cursor.curr);
else
- lfind((intptr *)head, hashnr, key, keylen, &cursor, pins);
+ lfind(head, hashnr, key, keylen, &cursor, pins);
res= 0;
}
} while (res == -1);
@@ -191,7 +192,7 @@ static LF_SLIST *lsearch(LF_SLIST * volatile *head, uint32 hashnr,
const uchar *key, uint keylen, LF_PINS *pins)
{
CURSOR cursor;
- int res=lfind((intptr *)head, hashnr, key, keylen, &cursor, pins);
+ int res=lfind(head, hashnr, key, keylen, &cursor, pins);
if (res) _lf_pin(pins, 2, cursor.curr);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
@@ -214,7 +215,7 @@ static inline uint calc_hash(LF_HASH *hash, const uchar *key, uint keylen)
return nr1 & INT_MAX32;
}
-#define MAX_LOAD 1
+#define MAX_LOAD 1.0
static void initialize_bucket(LF_HASH *, LF_SLIST * volatile*, uint, LF_PINS *);
void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
@@ -262,7 +263,7 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
uint csize, bucket, hashnr;
LF_SLIST *node, * volatile *el;
- lf_lock_by_pins(pins);
+ lf_rwlock_by_pins(pins);
node=(LF_SLIST *)_lf_alloc_new(pins);
memcpy(node+1, data, hash->element_size);
node->key= hash_key(hash, (uchar *)(node+1), &node->keylen);
@@ -275,13 +276,13 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
if (linsert(el, node, pins, hash->flags))
{
_lf_alloc_free(pins, node);
- lf_unlock_by_pins(pins);
+ lf_rwunlock_by_pins(pins);
return 1;
}
csize= hash->size;
if ((my_atomic_add32(&hash->count, 1)+1.0) / csize > MAX_LOAD)
my_atomic_cas32(&hash->size, &csize, csize*2);
- lf_unlock_by_pins(pins);
+ lf_rwunlock_by_pins(pins);
return 0;
}
@@ -298,17 +299,17 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
uint bucket, hashnr=calc_hash(hash, (uchar *)key, keylen);
bucket= hashnr % hash->size;
- lf_lock_by_pins(pins);
+ lf_rwlock_by_pins(pins);
el=_lf_dynarray_lvalue(&hash->array, bucket);
if (*el == NULL)
initialize_bucket(hash, el, bucket, pins);
if (ldelete(el, my_reverse_bits(hashnr) | 1, (uchar *)key, keylen, pins))
{
- lf_unlock_by_pins(pins);
+ lf_rwunlock_by_pins(pins);
return 1;
}
my_atomic_add32(&hash->count, -1);
- lf_unlock_by_pins(pins);
+ lf_rwunlock_by_pins(pins);
return 0;
}
@@ -322,13 +323,13 @@ void *lf_hash_search(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
uint bucket, hashnr=calc_hash(hash, (uchar *)key, keylen);
bucket= hashnr % hash->size;
- lf_lock_by_pins(pins);
+ lf_rwlock_by_pins(pins);
el=_lf_dynarray_lvalue(&hash->array, bucket);
if (*el == NULL)
initialize_bucket(hash, el, bucket, pins);
found= lsearch(el, my_reverse_bits(hashnr) | 1, (uchar *)key, keylen, pins);
- lf_unlock_by_pins(pins);
- return found+1;
+ lf_rwunlock_by_pins(pins);
+ return found ? found+1 : 0;
}
static char *dummy_key="";
@@ -347,7 +348,7 @@ static void initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
dummy->keylen=0;
if ((cur= linsert(el, dummy, pins, 0)))
{
- _lf_alloc_free(pins, dummy);
+ my_free((void *)dummy, MYF(0));
dummy= cur;
}
my_atomic_casptr((void **)node, (void **)&tmp, dummy);
diff --git a/mysys/my_atomic.c b/mysys/my_atomic.c
index 2908a44961a..fbeb3d63bef 100644
--- a/mysys/my_atomic.c
+++ b/mysys/my_atomic.c
@@ -35,7 +35,7 @@
*/
int my_atomic_initialize()
{
- DBUG_ASSERT(sizeof(intptr) == sizeof(void *));
+ char assert_the_size[sizeof(intptr) == sizeof(void *) ? 1 : -1];
/* currently the only thing worth checking is SMP/UP issue */
#ifdef MY_ATOMIC_MODE_DUMMY
return my_getncpus() == 1 ? MY_ATOMIC_OK : MY_ATOMIC_NOT_1CPU;