summaryrefslogtreecommitdiff
path: root/mysys
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2015-01-31 21:48:47 +0100
committerSergei Golubchik <serg@mariadb.org>2015-01-31 21:48:47 +0100
commit4b21cd21fef2763d757aa15681c9c9a7ed5db3c9 (patch)
treee9e233392b47f93de12cecce1f7f403ce26057b0 /mysys
parent0b049b40124d72d77c008d4441e4db2e77f0f127 (diff)
parenta06624d61f36c70edd63adcfe2803bb7a8564de5 (diff)
downloadmariadb-git-4b21cd21fef2763d757aa15681c9c9a7ed5db3c9.tar.gz
Merge branch '10.0' into merge-wip
Diffstat (limited to 'mysys')
-rw-r--r--mysys/mf_keycache.c59
-rw-r--r--mysys/my_context.c13
-rw-r--r--mysys/my_wincond.c24
-rw-r--r--mysys/thr_lock.c19
4 files changed, 55 insertions, 60 deletions
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index 5505693ce2c..c0cd1594e72 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -1020,11 +1020,11 @@ void end_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, my_bool cleanup)
*/
static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
- struct st_my_thread_var *thread)
+ struct st_my_thread_var *thread)
{
struct st_my_thread_var *last;
-
DBUG_ASSERT(!thread->next && !thread->prev);
+
if (! (last= wqueue->last_thread))
{
/* Queue is empty */
@@ -1033,10 +1033,15 @@ static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
}
else
{
- thread->prev= last->next->prev;
- last->next->prev= &thread->next;
- thread->next= last->next;
- last->next= thread;
+ DBUG_ASSERT(last->next->prev == &last->next);
+ /* Add backlink to previous element */
+ thread->prev= last->next->prev;
+ /* Fix first in list to point backwords to current */
+ last->next->prev= &thread->next;
+ /* Next should point to the first element in list */
+ thread->next= last->next;
+ /* Fix old element to point to new one */
+ last->next= thread;
}
wqueue->last_thread= thread;
}
@@ -1057,17 +1062,22 @@ static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
*/
static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
- struct st_my_thread_var *thread)
+ struct st_my_thread_var *thread)
{
KEYCACHE_DBUG_PRINT("unlink_from_queue", ("thread %ld", thread->id));
DBUG_ASSERT(thread->next && thread->prev);
+
if (thread->next == thread)
+ {
/* The queue contains only one member */
wqueue->last_thread= NULL;
+ }
else
{
+ /* Remove current element from list */
thread->next->prev= thread->prev;
- *thread->prev=thread->next;
+ *thread->prev= thread->next;
+ /* If first element, change list pointer to point to previous element */
if (wqueue->last_thread == thread)
wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next,
thread->prev);
@@ -1111,10 +1121,10 @@ static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
{
struct st_my_thread_var *last;
struct st_my_thread_var *thread= my_thread_var;
-
- /* Add to queue. */
DBUG_ASSERT(!thread->next);
DBUG_ASSERT(!thread->prev); /* Not required, but must be true anyway. */
+
+ /* Add to queue. */
if (! (last= wqueue->last_thread))
thread->next= thread;
else
@@ -1125,7 +1135,7 @@ static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
wqueue->last_thread= thread;
/*
- Wait until thread is removed from queue by the signalling thread.
+ Wait until thread is removed from queue by the signaling thread.
The loop protects against stray signals.
*/
do
@@ -1163,10 +1173,11 @@ static void release_whole_queue(KEYCACHE_WQUEUE *wqueue)
if (!(last= wqueue->last_thread))
return;
- next= last->next;
+ next= last->next; /* First (oldest) element */
do
{
thread=next;
+ DBUG_ASSERT(thread);
KEYCACHE_DBUG_PRINT("release_whole_queue: signal",
("thread %ld", thread->id));
/* Signal the thread. */
@@ -1359,7 +1370,7 @@ static void link_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block,
keycache->waiting_for_block.last_thread;
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
- HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info;
+ HASH_LINK *hash_link= (HASH_LINK *) first_thread->keycache_link;
struct st_my_thread_var *thread;
do
{
@@ -1369,7 +1380,7 @@ static void link_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block,
We notify about the event all threads that ask
for the same page as the first thread in the queue
*/
- if ((HASH_LINK *) thread->opt_info == hash_link)
+ if ((HASH_LINK *) thread->keycache_link == hash_link)
{
KEYCACHE_DBUG_PRINT("link_block: signal", ("thread %ld", thread->id));
keycache_pthread_cond_signal(&thread->suspend);
@@ -1703,7 +1714,7 @@ static void unlink_hash(SIMPLE_KEY_CACHE_CB *keycache, HASH_LINK *hash_link)
keycache->waiting_for_hash_link.last_thread;
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
- KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info);
+ KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->keycache_link);
struct st_my_thread_var *thread;
hash_link->file= first_page->file;
@@ -1712,7 +1723,7 @@ static void unlink_hash(SIMPLE_KEY_CACHE_CB *keycache, HASH_LINK *hash_link)
{
KEYCACHE_PAGE *page;
thread= next_thread;
- page= (KEYCACHE_PAGE *) thread->opt_info;
+ page= (KEYCACHE_PAGE *) thread->keycache_link;
next_thread= thread->next;
/*
We notify about the event all threads that ask
@@ -1801,13 +1812,13 @@ restart:
KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting"));
page.file= file;
page.filepos= filepos;
- thread->opt_info= (void *) &page;
+ thread->keycache_link= (void *) &page;
link_into_queue(&keycache->waiting_for_hash_link, thread);
KEYCACHE_DBUG_PRINT("get_hash_link: wait",
("suspend thread %ld", thread->id));
keycache_pthread_cond_wait(&thread->suspend,
&keycache->cache_lock);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
goto restart;
}
hash_link->file= file;
@@ -1965,7 +1976,7 @@ restart:
for another file/pos.
*/
thread= my_thread_var;
- thread->opt_info= (void *) hash_link;
+ thread->keycache_link= (void *) hash_link;
link_into_queue(&keycache->waiting_for_block, thread);
do
{
@@ -1974,7 +1985,7 @@ restart:
keycache_pthread_cond_wait(&thread->suspend,
&keycache->cache_lock);
} while (thread->next);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
/*
A block should now be assigned to the hash_link. But it may
still need to be evicted. Anyway, we should re-check the
@@ -2312,7 +2323,7 @@ restart:
*/
struct st_my_thread_var *thread= my_thread_var;
- thread->opt_info= (void *) hash_link;
+ thread->keycache_link= (void *) hash_link;
link_into_queue(&keycache->waiting_for_block, thread);
do
{
@@ -2322,7 +2333,7 @@ restart:
&keycache->cache_lock);
}
while (thread->next);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
/* Assert that block has a request registered. */
DBUG_ASSERT(hash_link->block->requests);
/* Assert that block is not in LRU ring. */
@@ -4577,7 +4588,7 @@ static void keycache_dump(SIMPLE_KEY_CACHE_CB *keycache)
do
{
thread=thread->next;
- page= (KEYCACHE_PAGE *) thread->opt_info;
+ page= (KEYCACHE_PAGE *) thread->keycache_link;
fprintf(keycache_dump_file,
"thread:%u, (file,filepos)=(%u,%lu)\n",
thread->id,(uint) page->file,(ulong) page->filepos);
@@ -4593,7 +4604,7 @@ static void keycache_dump(SIMPLE_KEY_CACHE_CB *keycache)
do
{
thread=thread->next;
- hash_link= (HASH_LINK *) thread->opt_info;
+ hash_link= (HASH_LINK *) thread->keycache_link;
fprintf(keycache_dump_file,
"thread:%u hash_link:%u (file,filepos)=(%u,%lu)\n",
thread->id, (uint) HASH_LINK_NUMBER(hash_link),
diff --git a/mysys/my_context.c b/mysys/my_context.c
index 4d9f1a1a12f..60c0014b3b9 100644
--- a/mysys/my_context.c
+++ b/mysys/my_context.c
@@ -727,33 +727,36 @@ my_context_continue(struct my_context *c)
#ifdef MY_CONTEXT_DISABLE
int
-my_context_continue(struct my_context *c)
+my_context_continue(struct my_context *c __attribute__((unused)))
{
return -1;
}
int
-my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
+my_context_spawn(struct my_context *c __attribute__((unused)),
+ void (*f)(void *) __attribute__((unused)),
+ void *d __attribute__((unused)))
{
return -1;
}
int
-my_context_yield(struct my_context *c)
+my_context_yield(struct my_context *c __attribute__((unused)))
{
return -1;
}
int
-my_context_init(struct my_context *c, size_t stack_size)
+my_context_init(struct my_context *c __attribute__((unused)),
+ size_t stack_size __attribute__((unused)))
{
return -1; /* Out of memory */
}
void
-my_context_destroy(struct my_context *c)
+my_context_destroy(struct my_context *c __attribute__((unused)))
{
}
diff --git a/mysys/my_wincond.c b/mysys/my_wincond.c
index 6674a5d394d..c761064dd96 100644
--- a/mysys/my_wincond.c
+++ b/mysys/my_wincond.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -328,26 +328,4 @@ int pthread_attr_destroy(pthread_attr_t *connect_att)
return 0;
}
-/****************************************************************************
-** Fix localtime_r() to be a bit safer
-****************************************************************************/
-
-struct tm *localtime_r(const time_t *timep,struct tm *tmp)
-{
- if (*timep == (time_t) -1) /* This will crash win32 */
- {
- bzero(tmp,sizeof(*tmp));
- }
- else
- {
- struct tm *res=localtime(timep);
- if (!res) /* Wrong date */
- {
- bzero(tmp,sizeof(*tmp)); /* Keep things safe */
- return 0;
- }
- *tmp= *res;
- }
- return tmp;
-}
#endif /* __WIN__ */
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index bb16ba92088..37dad48396a 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -343,7 +343,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
found_errors++;
fprintf(stderr,
"Warning at '%s': Write lock %d waiting while no exclusive read locks\n",where,(int) lock->write_wait.data->type);
- DBUG_PRINT("warning", ("Warning at '%s': Write lock %d waiting while no exclusive read locks\n",where,(int) lock->write_wait.data->type));
+ DBUG_PRINT("warning", ("Warning at '%s': Write lock %d waiting while no exclusive read locks",where,(int) lock->write_wait.data->type));
}
}
}
@@ -363,7 +363,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
fprintf(stderr,
"Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d\n",
where, data->type);
- DBUG_PRINT("warning", ("Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d\n",
+ DBUG_PRINT("warning", ("Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d",
where, data->type));
break;
}
@@ -379,7 +379,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
fprintf(stderr,
"Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock\n",
where);
- DBUG_PRINT("warning", ("Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock\n",
+ DBUG_PRINT("warning", ("Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock",
where));
}
@@ -402,7 +402,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
"Warning at '%s' for lock: %d: Found lock of type %d that is write and read locked. Read_no_write_count: %d\n",
where, (int) type, lock->write.data->type,
lock->read_no_write_count);
- DBUG_PRINT("warning",("At '%s' for lock %d: Found lock of type %d that is write and read locked\n",
+ DBUG_PRINT("warning",("At '%s' for lock %d: Found lock of type %d that is write and read locked",
where, (int) type,
lock->write.data->type));
}
@@ -914,7 +914,8 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
The idea is to allow us to get a lock at once if we already have
a write lock or if there is no pending write locks and if all
write locks are of the same type and are either
- TL_WRITE_ALLOW_WRITE or TL_WRITE_CONCURRENT_INSERT
+ TL_WRITE_ALLOW_WRITE or TL_WRITE_CONCURRENT_INSERT and
+ there is no TL_READ_NO_INSERT lock.
Note that, since lock requests for the same table are sorted in
such way that requests with higher thr_lock_type value come first
@@ -931,7 +932,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
situation.
**) The exceptions are situations when:
- when old lock type is TL_WRITE_DELAYED
- But these should never happen within MySQL.
+ But these should never happen within MariaDB.
Therefore it is OK to allow acquiring write lock on the table if
this thread already holds some write lock on it.
@@ -947,9 +948,11 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
if (((lock_type == TL_WRITE_ALLOW_WRITE ||
(lock_type == TL_WRITE_CONCURRENT_INSERT &&
- lock->allow_multiple_concurrent_insert)) &&
+ lock->allow_multiple_concurrent_insert &&
+ !lock->read_no_write_count)) &&
! lock->write_wait.data &&
- lock->write.data->type == lock_type) ||
+ lock->write.data->type == lock_type &&
+ ! lock->read_no_write_count) ||
has_old_lock(lock->write.data, data->owner))
{
DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d",