summaryrefslogtreecommitdiff
path: root/mysys
diff options
context:
space:
mode:
Diffstat (limited to 'mysys')
-rw-r--r--mysys/lf_alloc-pin.c29
-rw-r--r--mysys/lf_dynarray.c35
-rw-r--r--mysys/lf_hash.c119
-rw-r--r--mysys/my_getsystime.c4
4 files changed, 98 insertions, 89 deletions
diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c
index b96fe42311b..43055766c3e 100644
--- a/mysys/lf_alloc-pin.c
+++ b/mysys/lf_alloc-pin.c
@@ -91,7 +91,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins);
See the latter for details.
*/
void lf_pinbox_init(LF_PINBOX *pinbox, uint free_ptr_offset,
- lf_pinbox_free_func *free_func,void *free_func_arg)
+ lf_pinbox_free_func *free_func, void *free_func_arg)
{
DBUG_ASSERT(sizeof(LF_PINS) == 128);
DBUG_ASSERT(free_ptr_offset % sizeof(void *) == 0);
@@ -306,7 +306,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
{
if (addr) /* use binary search */
{
- void **a,**b,**c;
+ void **a, **b, **c;
for (a= addr, b= addr+npins-1, c= a+(b-a)/2; b-a>1; c= a+(b-a)/2)
if (cur == *c)
a= b= c;
@@ -337,13 +337,13 @@ found:
callback for _lf_pinbox_real_free to free an unpinned object -
add it back to the allocator stack
*/
-static void alloc_free(void *node, LF_ALLOCATOR *allocator)
+static void alloc_free(struct st_lf_alloc_node *node, LF_ALLOCATOR *allocator)
{
- void *tmp;
+ struct st_lf_alloc_node *tmp;
tmp= allocator->top;
do
{
- (*(void **)node)= tmp;
+ node->next= tmp;
} while (!my_atomic_casptr((void **)&allocator->top, (void **)&tmp, node) &&
LF_BACKOFF);
}
@@ -379,12 +379,12 @@ void lf_alloc_init(LF_ALLOCATOR *allocator, uint size, uint free_ptr_offset)
*/
void lf_alloc_destroy(LF_ALLOCATOR *allocator)
{
- void *el= allocator->top;
- while (el)
+ struct st_lf_alloc_node *node= allocator->top;
+ while (node)
{
- void *tmp= *(void **)el;
- my_free(el, MYF(0));
- el= tmp;
+ struct st_lf_alloc_node *tmp= node->next;
+ my_free((void *)node, MYF(0));
+ node= tmp;
}
lf_pinbox_destroy(&allocator->pinbox);
allocator->top= 0;
@@ -400,7 +400,7 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator)
void *_lf_alloc_new(LF_PINS *pins)
{
LF_ALLOCATOR *allocator= (LF_ALLOCATOR *)(pins->pinbox->free_func_arg);
- void *node;
+ struct st_lf_alloc_node *node;
for (;;)
{
do
@@ -410,7 +410,8 @@ void *_lf_alloc_new(LF_PINS *pins)
} while (node != allocator->top && LF_BACKOFF);
if (!node)
{
- if (!(node= my_malloc(allocator->element_size, MYF(MY_WME|MY_ZEROFILL))))
+ if (!(node= (void *)my_malloc(allocator->element_size,
+ MYF(MY_WME|MY_ZEROFILL))))
break;
#ifdef MY_LF_EXTRA_DEBUG
my_atomic_add32(&allocator->mallocs, 1);
@@ -434,8 +435,8 @@ void *_lf_alloc_new(LF_PINS *pins)
uint lf_alloc_in_pool(LF_ALLOCATOR *allocator)
{
uint i;
- void *node;
- for (node= allocator->top, i= 0; node; node= *(void **)node, i++)
+ struct st_lf_alloc_node *node;
+ for (node= allocator->top, i= 0; node; node= node->next, i++)
/* no op */;
return i;
}
diff --git a/mysys/lf_dynarray.c b/mysys/lf_dynarray.c
index d63af91813e..c6dd654bf03 100644
--- a/mysys/lf_dynarray.c
+++ b/mysys/lf_dynarray.c
@@ -19,9 +19,9 @@
(so no pointer into the array may ever become invalid).
Memory is allocated in non-contiguous chunks.
- This data structure is not space efficient for sparce arrays.
+ This data structure is not space efficient for sparse arrays.
- The number of elements is limited to 2^16
+ The number of elements is limited to 4311810304
Every element is aligned to sizeof(element) boundary
(to avoid false sharing if element is big enough).
@@ -49,7 +49,8 @@ void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
static void recursive_free(void **alloc, int level)
{
- if (!alloc) return;
+ if (!alloc)
+ return;
if (level)
{
@@ -68,10 +69,9 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
for (i= 0; i < LF_DYNARRAY_LEVELS; i++)
recursive_free(array->level[i], i);
my_atomic_rwlock_destroy(&array->lock);
- bzero(array, sizeof(*array));
}
-static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
+static const ulong dynarray_idxes_in_prev_levels[LF_DYNARRAY_LEVELS]=
{
0, /* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH,
@@ -82,6 +82,15 @@ static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
LF_DYNARRAY_LEVEL_LENGTH + LF_DYNARRAY_LEVEL_LENGTH
};
+static const ulong dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
+{
+ 0, /* +1 here to to avoid -1's below */
+ LF_DYNARRAY_LEVEL_LENGTH,
+ LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH,
+ LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH *
+ LF_DYNARRAY_LEVEL_LENGTH,
+};
+
/*
Returns a valid lvalue pointer to the element number 'idx'.
Allocates memory if necessary.
@@ -91,16 +100,17 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
void * ptr, * volatile * ptr_ptr= 0;
int i;
- for (i= 3; idx < dynarray_idxes_in_prev_level[i]; i--) /* no-op */;
+ for (i= LF_DYNARRAY_LEVELS-1; idx < dynarray_idxes_in_prev_levels[i]; i--)
+ /* no-op */;
ptr_ptr= &array->level[i];
- idx-= dynarray_idxes_in_prev_level[i];
+ idx-= dynarray_idxes_in_prev_levels[i];
for (; i > 0; i--)
{
if (!(ptr= *ptr_ptr))
{
void *alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
- MYF(MY_WME|MY_ZEROFILL));
- if (!alloc)
+ MYF(MY_WME|MY_ZEROFILL));
+ if (unlikely(!alloc))
return(NULL);
if (my_atomic_casptr(ptr_ptr, &ptr, alloc))
ptr= alloc;
@@ -116,7 +126,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
max(array->size_of_element, sizeof(void *)),
MYF(MY_WME|MY_ZEROFILL));
- if (!alloc)
+ if (unlikely(!alloc))
return(NULL);
/* reserve the space for free() address */
data= alloc + sizeof(void *);
@@ -143,9 +153,10 @@ void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
void * ptr, * volatile * ptr_ptr= 0;
int i;
- for (i= 3; idx < dynarray_idxes_in_prev_level[i]; i--) /* no-op */;
+ for (i= LF_DYNARRAY_LEVELS-1; idx < dynarray_idxes_in_prev_levels[i]; i--)
+ /* no-op */;
ptr_ptr= &array->level[i];
- idx-= dynarray_idxes_in_prev_level[i];
+ idx-= dynarray_idxes_in_prev_levels[i];
for (; i > 0; i--)
{
if (!(ptr= *ptr_ptr))
diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c
index 66ad672f345..ff0eb8326d5 100644
--- a/mysys/lf_hash.c
+++ b/mysys/lf_hash.c
@@ -23,6 +23,7 @@
(but how to do it in lf_hash_delete ?)
*/
#include <my_global.h>
+#include <m_string.h>
#include <my_sys.h>
#include <my_bit.h>
#include <lf.h>
@@ -33,7 +34,7 @@ LF_REQUIRE_PINS(3);
typedef struct {
intptr volatile link; /* a pointer to the next element in a listand a flag */
uint32 hashnr; /* reversed hash number, for sorting */
- const uchar *key;
+ const byte *key;
uint keylen;
} LF_SLIST;
@@ -67,31 +68,31 @@ typedef struct {
pins[0..2] are used, they are NOT removed on return
*/
static int lfind(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
- const uchar *key, uint keylen, CURSOR *cursor, LF_PINS *pins)
+ const byte *key, uint keylen, CURSOR *cursor, LF_PINS *pins)
{
uint32 cur_hashnr;
- const uchar *cur_key;
+ const byte *cur_key;
uint cur_keylen;
intptr link;
retry:
- cursor->prev=(intptr *)head;
+ cursor->prev= (intptr *)head;
do {
- cursor->curr=PTR(*cursor->prev);
- _lf_pin(pins,1,cursor->curr);
+ cursor->curr= PTR(*cursor->prev);
+ _lf_pin(pins, 1, cursor->curr);
} while(*cursor->prev != (intptr)cursor->curr && LF_BACKOFF);
for (;;)
{
if (!cursor->curr)
return 0;
do { // XXX or goto retry ?
- link=cursor->curr->link;
- cursor->next=PTR(link);
+ link= cursor->curr->link;
+ cursor->next= PTR(link);
_lf_pin(pins, 0, cursor->next);
} while(link != cursor->curr->link && LF_BACKOFF);
- cur_hashnr=cursor->curr->hashnr;
- cur_key=cursor->curr->key;
- cur_keylen=cursor->curr->keylen;
+ cur_hashnr= cursor->curr->hashnr;
+ cur_key= cursor->curr->key;
+ cur_keylen= cursor->curr->keylen;
if (*cursor->prev != (intptr)cursor->curr)
{
LF_BACKOFF;
@@ -101,12 +102,12 @@ retry:
{
if (cur_hashnr >= hashnr)
{
- int r=1;
+ int r= 1;
if (cur_hashnr > hashnr ||
- (r=my_strnncoll(cs, cur_key, cur_keylen, key, keylen)) >= 0)
+ (r= my_strnncoll(cs, cur_key, cur_keylen, key, keylen)) >= 0)
return !r;
}
- cursor->prev=&(cursor->curr->link);
+ cursor->prev= &(cursor->curr->link);
_lf_pin(pins, 2, cursor->curr);
}
else
@@ -120,7 +121,7 @@ retry:
goto retry;
}
}
- cursor->curr=cursor->next;
+ cursor->curr= cursor->next;
_lf_pin(pins, 1, cursor->curr);
}
}
@@ -141,21 +142,21 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
LF_SLIST *node, LF_PINS *pins, uint flags)
{
CURSOR cursor;
- int res=-1;
+ int res= -1;
do
{
if (lfind(head, cs, node->hashnr, node->key, node->keylen,
&cursor, pins) &&
(flags & LF_HASH_UNIQUE))
- res=0; /* duplicate found */
+ res= 0; /* duplicate found */
else
{
- node->link=(intptr)cursor.curr;
+ node->link= (intptr)cursor.curr;
assert(node->link != (intptr)node);
assert(cursor.prev != &node->link);
if (my_atomic_casptr((void **)cursor.prev, (void **)&cursor.curr, node))
- res=1; /* inserted ok */
+ res= 1; /* inserted ok */
}
} while (res == -1);
_lf_unpin(pins, 0);
@@ -177,10 +178,10 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
it uses pins[0..2], on return all pins are removed.
*/
static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
- const uchar *key, uint keylen, LF_PINS *pins)
+ const byte *key, uint keylen, LF_PINS *pins)
{
CURSOR cursor;
- int res=-1;
+ int res= -1;
do
{
@@ -218,30 +219,30 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
all other pins are removed.
*/
static LF_SLIST *lsearch(LF_SLIST * volatile *head, CHARSET_INFO *cs,
- uint32 hashnr, const uchar *key, uint keylen,
+ uint32 hashnr, const byte *key, uint keylen,
LF_PINS *pins)
{
CURSOR cursor;
- int res=lfind(head, cs, hashnr, key, keylen, &cursor, pins);
+ int res= lfind(head, cs, hashnr, key, keylen, &cursor, pins);
if (res) _lf_pin(pins, 2, cursor.curr);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
return res ? cursor.curr : 0;
}
-static inline const uchar* hash_key(const LF_HASH *hash,
- const uchar *record, uint *length)
+static inline const byte* hash_key(const LF_HASH *hash,
+ const byte *record, uint *length)
{
if (hash->get_key)
- return (*hash->get_key)(record,length,0);
- *length=hash->key_length;
+ return (*hash->get_key)(record, length, 0);
+ *length= hash->key_length;
return record + hash->key_offset;
}
-static inline uint calc_hash(LF_HASH *hash, const uchar *key, uint keylen)
+static inline uint calc_hash(LF_HASH *hash, const byte *key, uint keylen)
{
- ulong nr1=1, nr2=4;
- hash->charset->coll->hash_sort(hash->charset,key,keylen,&nr1,&nr2);
+ ulong nr1= 1, nr2= 4;
+ hash->charset->coll->hash_sort(hash->charset, key, keylen, &nr1, &nr2);
return nr1 & INT_MAX32;
}
@@ -258,28 +259,28 @@ void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
lf_alloc_init(&hash->alloc, sizeof(LF_SLIST)+element_size,
offsetof(LF_SLIST, key));
lf_dynarray_init(&hash->array, sizeof(LF_SLIST **));
- hash->size=1;
- hash->count=0;
- hash->element_size=element_size;
- hash->flags=flags;
- hash->charset=charset ? charset : &my_charset_bin;
- hash->key_offset=key_offset;
- hash->key_length=key_length;
- hash->get_key=get_key;
+ hash->size= 1;
+ hash->count= 0;
+ hash->element_size= element_size;
+ hash->flags= flags;
+ hash->charset= charset ? charset : &my_charset_bin;
+ hash->key_offset= key_offset;
+ hash->key_length= key_length;
+ hash->get_key= get_key;
DBUG_ASSERT(get_key ? !key_offset && !key_length : key_length);
}
void lf_hash_destroy(LF_HASH *hash)
{
- LF_SLIST *el=*(LF_SLIST **)_lf_dynarray_lvalue(&hash->array, 0);
+ LF_SLIST *el= *(LF_SLIST **)_lf_dynarray_lvalue(&hash->array, 0);
while (el)
{
- intptr next=el->link;
+ intptr next= el->link;
if (el->hashnr & 1)
lf_alloc_real_free(&hash->alloc, el);
else
my_free((void *)el, MYF(0));
- el=(LF_SLIST *)next;
+ el= (LF_SLIST *)next;
}
lf_alloc_destroy(&hash->alloc);
lf_dynarray_destroy(&hash->array);
@@ -299,19 +300,19 @@ void lf_hash_destroy(LF_HASH *hash)
*/
int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
{
- uint csize, bucket, hashnr;
+ int csize, bucket, hashnr;
LF_SLIST *node, * volatile *el;
lf_rwlock_by_pins(pins);
- node=(LF_SLIST *)_lf_alloc_new(pins);
+ node= (LF_SLIST *)_lf_alloc_new(pins);
memcpy(node+1, data, hash->element_size);
- node->key= hash_key(hash, (uchar *)(node+1), &node->keylen);
+ node->key= hash_key(hash, (byte *)(node+1), &node->keylen);
hashnr= calc_hash(hash, node->key, node->keylen);
bucket= hashnr % hash->size;
- el=_lf_dynarray_lvalue(&hash->array, bucket);
+ el= _lf_dynarray_lvalue(&hash->array, bucket);
if (*el == NULL)
initialize_bucket(hash, el, bucket, pins);
- node->hashnr=my_reverse_bits(hashnr) | 1;
+ node->hashnr= my_reverse_bits(hashnr) | 1;
if (linsert(el, hash->charset, node, pins, hash->flags))
{
_lf_alloc_free(pins, node);
@@ -335,15 +336,15 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
{
LF_SLIST * volatile *el;
- uint bucket, hashnr=calc_hash(hash, (uchar *)key, keylen);
+ uint bucket, hashnr= calc_hash(hash, (byte *)key, keylen);
bucket= hashnr % hash->size;
lf_rwlock_by_pins(pins);
- el=_lf_dynarray_lvalue(&hash->array, bucket);
+ el= _lf_dynarray_lvalue(&hash->array, bucket);
if (*el == NULL)
initialize_bucket(hash, el, bucket, pins);
if (ldelete(el, hash->charset, my_reverse_bits(hashnr) | 1,
- (uchar *)key, keylen, pins))
+ (byte *)key, keylen, pins))
{
lf_rwunlock_by_pins(pins);
return 1;
@@ -360,33 +361,33 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
void *lf_hash_search(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
{
LF_SLIST * volatile *el, *found;
- uint bucket, hashnr=calc_hash(hash, (uchar *)key, keylen);
+ uint bucket, hashnr= calc_hash(hash, (byte *)key, keylen);
bucket= hashnr % hash->size;
lf_rwlock_by_pins(pins);
- el=_lf_dynarray_lvalue(&hash->array, bucket);
+ el= _lf_dynarray_lvalue(&hash->array, bucket);
if (*el == NULL)
initialize_bucket(hash, el, bucket, pins);
found= lsearch(el, hash->charset, my_reverse_bits(hashnr) | 1,
- (uchar *)key, keylen, pins);
+ (byte *)key, keylen, pins);
lf_rwunlock_by_pins(pins);
return found ? found+1 : 0;
}
-static char *dummy_key="";
+static char *dummy_key= "";
static void initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
uint bucket, LF_PINS *pins)
{
uint parent= my_clear_highest_bit(bucket);
- LF_SLIST *dummy=(LF_SLIST *)my_malloc(sizeof(LF_SLIST), MYF(MY_WME));
- LF_SLIST **tmp=0, *cur;
- LF_SLIST * volatile *el=_lf_dynarray_lvalue(&hash->array, parent);
+ LF_SLIST *dummy= (LF_SLIST *)my_malloc(sizeof(LF_SLIST), MYF(MY_WME));
+ LF_SLIST **tmp= 0, *cur;
+ LF_SLIST * volatile *el= _lf_dynarray_lvalue(&hash->array, parent);
if (*el == NULL && bucket)
initialize_bucket(hash, el, parent, pins);
- dummy->hashnr=my_reverse_bits(bucket);
- dummy->key=dummy_key;
- dummy->keylen=0;
+ dummy->hashnr= my_reverse_bits(bucket);
+ dummy->key= dummy_key;
+ dummy->keylen= 0;
if ((cur= linsert(el, hash->charset, dummy, pins, 0)))
{
my_free((void *)dummy, MYF(0));
diff --git a/mysys/my_getsystime.c b/mysys/my_getsystime.c
index 91c977f0b5a..d1ed4f2ec92 100644
--- a/mysys/my_getsystime.c
+++ b/mysys/my_getsystime.c
@@ -35,10 +35,6 @@ ulonglong my_getsystime()
LARGE_INTEGER t_cnt;
if (!offset)
{
- /* strictly speaking there should be a mutex to protect
- initialization section. But my_getsystime() is called from
- UUID() code, and UUID() calls are serialized with a mutex anyway
- */
LARGE_INTEGER li;
FILETIME ft;
GetSystemTimeAsFileTime(&ft);