summaryrefslogtreecommitdiff
path: root/unittest/mysys
diff options
context:
space:
mode:
Diffstat (limited to 'unittest/mysys')
-rw-r--r--unittest/mysys/Makefile.am8
-rw-r--r--unittest/mysys/my_atomic-t.c257
2 files changed, 181 insertions, 84 deletions
diff --git a/unittest/mysys/Makefile.am b/unittest/mysys/Makefile.am
index 54b3d203e10..229e8f0339b 100644
--- a/unittest/mysys/Makefile.am
+++ b/unittest/mysys/Makefile.am
@@ -13,13 +13,11 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-AM_CPPFLAGS = @ZLIB_INCLUDES@ -I$(top_builddir)/include
-AM_CPPFLAGS += -I$(top_srcdir)/include -I$(top_srcdir)/unittest/mytap
+INCLUDES = @ZLIB_INCLUDES@ -I$(top_builddir)/include \
+ -I$(top_srcdir)/include -I$(top_srcdir)/unittest/mytap
+
LDADD = $(top_builddir)/unittest/mytap/libmytap.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/strings/libmystrings.a
-
-noinst_PROGRAMS = bitmap-t base64-t my_atomic-t
-
diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c
index c4ba7850ae1..d3be33f4163 100644
--- a/unittest/mysys/my_atomic-t.c
+++ b/unittest/mysys/my_atomic-t.c
@@ -13,27 +13,27 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <my_global.h>
#include <tap.h>
+
+#include <my_global.h>
#include <my_sys.h>
#include <my_atomic.h>
+#include <lf.h>
-int32 a32,b32,c32;
+volatile uint32 a32,b32;
+volatile int32 c32, N;
my_atomic_rwlock_t rwl;
-
-pthread_attr_t thr_attr;
-pthread_mutex_t mutex;
-pthread_cond_t cond;
-int N;
+LF_ALLOCATOR lf_allocator;
+LF_HASH lf_hash;
/* add and sub a random number in a loop. Must get 0 at the end */
pthread_handler_t test_atomic_add_handler(void *arg)
{
- int m=*(int *)arg;
+ int m= (*(int *)arg)/2;
int32 x;
- for (x=((int)((long)(&m))); m ; m--)
+ for (x= ((int)(intptr)(&m)); m ; m--)
{
- x=x*m+0x87654321;
+ x= (x*m+0x87654321) & INT_MAX32;
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, x);
my_atomic_rwlock_wrunlock(&rwl);
@@ -42,10 +42,6 @@ pthread_handler_t test_atomic_add_handler(void *arg)
my_atomic_add32(&a32, -x);
my_atomic_rwlock_wrunlock(&rwl);
}
- pthread_mutex_lock(&mutex);
- N--;
- if (!N) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
return 0;
}
@@ -57,30 +53,24 @@ pthread_handler_t test_atomic_add_handler(void *arg)
5. subtract result from a32
must get 0 in a32 at the end
*/
-pthread_handler_t test_atomic_swap_handler(void *arg)
+pthread_handler_t test_atomic_fas_handler(void *arg)
{
- int m=*(int *)arg;
- int32 x;
-
- my_atomic_rwlock_wrlock(&rwl);
- x=my_atomic_add32(&b32, 1);
- my_atomic_rwlock_wrunlock(&rwl);
+ int m= *(int *)arg;
+ uint32 x= my_atomic_add32(&b32, 1);
- my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, x);
- my_atomic_rwlock_wrunlock(&rwl);
for (; m ; m--)
{
my_atomic_rwlock_wrlock(&rwl);
- x=my_atomic_swap32(&c32, x);
+ x= my_atomic_fas32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
if (!x)
{
my_atomic_rwlock_wrlock(&rwl);
- x=my_atomic_swap32(&c32, x);
+ x= my_atomic_fas32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
@@ -88,113 +78,222 @@ pthread_handler_t test_atomic_swap_handler(void *arg)
my_atomic_add32(&a32, -x);
my_atomic_rwlock_wrunlock(&rwl);
- pthread_mutex_lock(&mutex);
- N--;
- if (!N) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
return 0;
}
/*
same as test_atomic_add_handler, but my_atomic_add32 is emulated with
- (slower) my_atomic_cas32
+ my_atomic_cas32 - notice that the slowdown is proportional to the
+ number of CPUs
*/
pthread_handler_t test_atomic_cas_handler(void *arg)
{
- int m=*(int *)arg, ok;
- int32 x,y;
- for (x=((int)((long)(&m))); m ; m--)
+ int m= (*(int *)arg)/2, ok= 0;
+ int32 x, y;
+ for (x= ((int)(intptr)(&m)); m ; m--)
{
my_atomic_rwlock_wrlock(&rwl);
- y=my_atomic_load32(&a32);
+ y= my_atomic_load32(&a32);
my_atomic_rwlock_wrunlock(&rwl);
-
- x=x*m+0x87654321;
+ x= (x*m+0x87654321) & INT_MAX32;
do {
my_atomic_rwlock_wrlock(&rwl);
- ok=my_atomic_cas32(&a32, &y, y+x);
+ ok= my_atomic_cas32(&a32, &y, (uint32)y+x);
my_atomic_rwlock_wrunlock(&rwl);
- } while (!ok);
+ } while (!ok) ;
do {
my_atomic_rwlock_wrlock(&rwl);
- ok=my_atomic_cas32(&a32, &y, y-x);
+ ok= my_atomic_cas32(&a32, &y, y-x);
my_atomic_rwlock_wrunlock(&rwl);
- } while (!ok);
+ } while (!ok) ;
}
- pthread_mutex_lock(&mutex);
- N--;
- if (!N) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
+ return 0;
+}
+
+/*
+ pin allocator - alloc and release an element in a loop
+*/
+pthread_handler_t test_lf_pinbox(void *arg)
+{
+ int m= *(int *)arg;
+ int32 x= 0;
+ LF_PINS *pins;
+
+ pins= lf_pinbox_get_pins(&lf_allocator.pinbox);
+
+ for (x= ((int)(intptr)(&m)); m ; m--)
+ {
+ lf_pinbox_put_pins(pins);
+ pins= lf_pinbox_get_pins(&lf_allocator.pinbox);
+ }
+ lf_pinbox_put_pins(pins);
+ return 0;
+}
+
+typedef union {
+ int32 data;
+ void *not_used;
+} TLA;
+
+pthread_handler_t test_lf_alloc(void *arg)
+{
+ int m= (*(int *)arg)/2;
+ int32 x,y= 0;
+ LF_PINS *pins;
+
+ pins= lf_alloc_get_pins(&lf_allocator);
+
+ for (x= ((int)(intptr)(&m)); m ; m--)
+ {
+ TLA *node1, *node2;
+ x= (x*m+0x87654321) & INT_MAX32;
+ node1= (TLA *)lf_alloc_new(pins);
+ node1->data= x;
+ y+= node1->data;
+ node1->data= 0;
+ node2= (TLA *)lf_alloc_new(pins);
+ node2->data= x;
+ y-= node2->data;
+ node2->data= 0;
+ lf_alloc_free(pins, node1);
+ lf_alloc_free(pins, node2);
+ }
+ lf_alloc_put_pins(pins);
+ my_atomic_rwlock_wrlock(&rwl);
+ my_atomic_add32(&a32, y);
+
+ if (my_atomic_add32(&N, -1) == 1)
+ {
+ diag("%d mallocs, %d pins in stack",
+ lf_allocator.mallocs, lf_allocator.pinbox.pins_in_stack);
+#ifdef MY_LF_EXTRA_DEBUG
+ a32|= lf_allocator.mallocs - lf_alloc_in_pool(&lf_allocator);
+#endif
+ }
+ my_atomic_rwlock_wrunlock(&rwl);
+ return 0;
+}
+
+#define N_TLH 1000
+pthread_handler_t test_lf_hash(void *arg)
+{
+ int m= (*(int *)arg)/(2*N_TLH);
+ int32 x,y,z,sum= 0, ins= 0;
+ LF_PINS *pins;
+
+ pins= lf_hash_get_pins(&lf_hash);
+
+ for (x= ((int)(intptr)(&m)); m ; m--)
+ {
+ int i;
+ y= x;
+ for (i= 0; i < N_TLH; i++)
+ {
+ x= (x*(m+i)+0x87654321) & INT_MAX32;
+ z= (x<0) ? -x : x;
+ if (lf_hash_insert(&lf_hash, pins, &z))
+ {
+ sum+= z;
+ ins++;
+ }
+ }
+ for (i= 0; i < N_TLH; i++)
+ {
+ y= (y*(m+i)+0x87654321) & INT_MAX32;
+ z= (y<0) ? -y : y;
+ if (lf_hash_delete(&lf_hash, pins, (uchar *)&z, sizeof(z)))
+ sum-= z;
+ }
+ }
+ lf_hash_put_pins(pins);
+ my_atomic_rwlock_wrlock(&rwl);
+ my_atomic_add32(&a32, sum);
+ my_atomic_add32(&b32, ins);
+
+ if (my_atomic_add32(&N, -1) == 1)
+ {
+ diag("%d mallocs, %d pins in stack, %d hash size, %d inserts",
+ lf_hash.alloc.mallocs, lf_hash.alloc.pinbox.pins_in_stack,
+ lf_hash.size, b32);
+ a32|= lf_hash.count;
+ }
+ my_atomic_rwlock_wrunlock(&rwl);
return 0;
}
void test_atomic(const char *test, pthread_handler handler, int n, int m)
{
- pthread_t t;
- ulonglong now=my_getsystime();
+ pthread_t *threads;
+ ulonglong now= my_getsystime();
+ int i;
a32= 0;
b32= 0;
c32= 0;
+ threads= (pthread_t *)my_malloc(sizeof(void *)*n, MYF(0));
+ if (!threads)
+ {
+ diag("Out of memory");
+ abort();
+ }
+
diag("Testing %s with %d threads, %d iterations... ", test, n, m);
- for (N=n ; n ; n--)
+ N= n;
+ for (i= 0 ; i < n ; i++)
{
- if (pthread_create(&t, &thr_attr, handler, &m) != 0)
+ if (pthread_create(threads+i, 0, handler, &m) != 0)
{
diag("Could not create thread");
- a32= 1;
- goto err;
+ abort();
}
}
-
- pthread_mutex_lock(&mutex);
- while (N)
- pthread_cond_wait(&cond, &mutex);
- pthread_mutex_unlock(&mutex);
- now=my_getsystime()-now;
-err:
- ok(a32 == 0, "tested %s in %g secs", test, ((double)now)/1e7);
+ for (i= 0 ; i < n ; i++)
+ pthread_join(threads[i], 0);
+ now= my_getsystime()-now;
+ ok(a32 == 0, "tested %s in %g secs (%d)", test, ((double)now)/1e7, a32);
+ my_free((void *)threads, MYF(0));
}
+
int main()
{
int err;
- diag("N CPUs: %d", my_getncpus());
+ my_init();
+
+ diag("N CPUs: %d, atomic ops: %s", my_getncpus(), MY_ATOMIC_MODE);
err= my_atomic_initialize();
- plan(4);
+ plan(7);
ok(err == 0, "my_atomic_initialize() returned %d", err);
- pthread_attr_init(&thr_attr);
- pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
- pthread_mutex_init(&mutex, 0);
- pthread_cond_init(&cond, 0);
my_atomic_rwlock_init(&rwl);
+ lf_alloc_init(&lf_allocator, sizeof(TLA), offsetof(TLA, not_used));
+ lf_hash_init(&lf_hash, sizeof(int), LF_HASH_UNIQUE, 0, sizeof(int), 0,
+ &my_charset_bin);
-#ifdef HPUX11
-#define CYCLES 1000
+#ifdef MY_ATOMIC_MODE_RWLOCKS
+#define CYCLES 3000
#else
-#define CYCLES 10000
+#define CYCLES 300000
#endif
#define THREADS 100
- test_atomic("my_atomic_add32", test_atomic_add_handler, THREADS, CYCLES);
- test_atomic("my_atomic_swap32", test_atomic_swap_handler, THREADS, CYCLES);
- test_atomic("my_atomic_cas32", test_atomic_cas_handler, THREADS, CYCLES);
+
+ test_atomic("my_atomic_add32", test_atomic_add_handler, THREADS,CYCLES);
+ test_atomic("my_atomic_fas32", test_atomic_fas_handler, THREADS,CYCLES);
+ test_atomic("my_atomic_cas32", test_atomic_cas_handler, THREADS,CYCLES);
+ test_atomic("lf_pinbox", test_lf_pinbox, THREADS,CYCLES);
+ test_atomic("lf_alloc", test_lf_alloc, THREADS,CYCLES);
+ test_atomic("lf_hash", test_lf_hash, THREADS,CYCLES/10);
+
+ lf_hash_destroy(&lf_hash);
+ lf_alloc_destroy(&lf_allocator);
/* workaround until we know why this includes dbug but not safemalloc */
if(err) { my_thread_global_init(); my_free(my_malloc(0, MYF(0)), MYF(0)); }
- /*
- workaround until we know why it crashes randomly on some machine
- (BUG#22320).
- */
- sleep(2);
-
- pthread_mutex_destroy(&mutex);
- pthread_cond_destroy(&cond);
- pthread_attr_destroy(&thr_attr);
my_atomic_rwlock_destroy(&rwl);
+ my_end(0);
return exit_status();
}