summaryrefslogtreecommitdiff
path: root/sql/mdl.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/mdl.cc')
-rw-r--r--sql/mdl.cc1603
1 files changed, 1051 insertions, 552 deletions
diff --git a/sql/mdl.cc b/sql/mdl.cc
index af7f310e598..dce917a1a2e 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -21,11 +21,40 @@
static bool mdl_initialized= 0;
+
+/**
+ A collection of all MDL locks. A singleton,
+ there is only one instance of the map in the server.
+ Maps MDL_key to MDL_lock instances.
+*/
+
+class MDL_map
+{
+public:
+ void init();
+ void destroy();
+ MDL_lock *find(const MDL_key *key);
+ MDL_lock *find_or_insert(const MDL_key *key);
+ void remove(MDL_lock *lock);
+private:
+ bool move_from_hash_to_lock_mutex(MDL_lock *lock);
+private:
+ /** All acquired locks in the server. */
+ HASH m_locks;
+ /* Protects access to m_locks hash. */
+ pthread_mutex_t m_mutex;
+};
+
+
/**
The lock context. Created internally for an acquired lock.
For a given name, there exists only one MDL_lock instance,
and it exists only when the lock has been granted.
Can be seen as an MDL subsystem's version of TABLE_SHARE.
+
+ This is an abstract class which lacks information about
+ compatibility rules for lock types. They should be specified
+ in its descendants.
*/
class MDL_lock
@@ -39,78 +68,128 @@ public:
typedef Ticket_list::Iterator Ticket_iterator;
- /** The type of lock (shared or exclusive). */
- enum
- {
- MDL_LOCK_SHARED,
- MDL_LOCK_EXCLUSIVE,
- } type;
+public:
/** The key of the object (data) being protected. */
MDL_key key;
/** List of granted tickets for this lock. */
Ticket_list granted;
+ /** Tickets for contexts waiting to acquire a shared lock. */
+ Ticket_list waiting_shared;
/**
+ Tickets for contexts waiting to acquire an exclusive lock.
There can be several upgraders and active exclusive
locks belonging to the same context. E.g.
in case of RENAME t1 to t2, t2 to t3, we attempt to
exclusively lock t2 twice.
*/
- Ticket_list waiting;
+ Ticket_list waiting_exclusive;
void *cached_object;
mdl_cached_object_release_hook cached_object_release_hook;
+ /** Mutex protecting this lock context. */
+ pthread_mutex_t m_mutex;
bool is_empty() const
{
- return (granted.is_empty() && waiting.is_empty());
+ return (granted.is_empty() && waiting_shared.is_empty() &&
+ waiting_exclusive.is_empty());
}
- bool can_grant_lock(const MDL_context *requestor_ctx,
- enum_mdl_type type, bool is_upgrade);
+ bool has_pending_exclusive_lock()
+ {
+ bool has_locks;
+ pthread_mutex_lock(&m_mutex);
+ has_locks= ! waiting_exclusive.is_empty();
+ pthread_mutex_unlock(&m_mutex);
+ return has_locks;
+ }
+ virtual bool can_grant_lock(const MDL_context *requestor_ctx,
+ enum_mdl_type type, bool is_upgrade)= 0;
+ virtual void wake_up_waiters()= 0;
inline static MDL_lock *create(const MDL_key *key);
- inline static void destroy(MDL_lock *lock);
-private:
+
MDL_lock(const MDL_key *key_arg)
- : type(MDL_LOCK_SHARED),
- key(key_arg),
+ : key(key_arg),
cached_object(NULL),
- cached_object_release_hook(NULL)
+ cached_object_release_hook(NULL),
+ m_ref_usage(0),
+ m_ref_release(0),
+ m_is_destroyed(FALSE)
{
+ pthread_mutex_init(&m_mutex, NULL);
}
-};
+ virtual ~MDL_lock()
+ {
+ pthread_mutex_destroy(&m_mutex);
+ }
+ inline static void destroy(MDL_lock *lock);
+public:
+ /**
+ These three members are used to make it possible to separate
+ the mdl_locks.m_mutex mutex and MDL_lock::m_mutex in
+ MDL_map::find_or_insert() for increased scalability.
+ The 'm_is_destroyed' member is only set by destroyers that
+ have both the mdl_locks.m_mutex and MDL_lock::m_mutex, thus
+ holding any of the mutexes is sufficient to read it.
+ The 'm_ref_usage; is incremented under protection by
+ mdl_locks.m_mutex, but when 'm_is_destroyed' is set to TRUE, this
+ member is moved to be protected by the MDL_lock::m_mutex.
+ This means that the MDL_map::find_or_insert() which only
+ holds the MDL_lock::m_mutex can compare it to 'm_ref_release'
+ without acquiring mdl_locks.m_mutex again and if equal it can also
+ destroy the lock object safely.
+ The 'm_ref_release' is incremented under protection by
+ MDL_lock::m_mutex.
+ Note since we are only interested in equality of these two
+ counters we don't have to worry about overflows as long as
+ their size is big enough to hold maximum number of concurrent
+ threads on the system.
+ */
+ uint m_ref_usage;
+ uint m_ref_release;
+ bool m_is_destroyed;
+};
-static pthread_mutex_t LOCK_mdl;
-static pthread_cond_t COND_mdl;
-static HASH mdl_locks;
/**
- An implementation of the global metadata lock. The only
- locking modes which are supported at the moment are SHARED and
- INTENTION EXCLUSIVE. Note, that SHARED global metadata lock
- is acquired automatically when one tries to acquire an EXCLUSIVE
- or UPGRADABLE SHARED metadata lock on an individual object.
+ An implementation of the global metadata lock. The only locking modes
+ which are supported at the moment are SHARED and INTENTION EXCLUSIVE.
*/
-class MDL_global_lock
+class MDL_global_lock : public MDL_lock
{
public:
- uint waiting_shared;
- uint active_shared;
- uint active_intention_exclusive;
+ MDL_global_lock(const MDL_key *key_arg)
+ : MDL_lock(key_arg)
+ { }
- bool is_empty() const
- {
- return (waiting_shared == 0 && active_shared == 0 &&
- active_intention_exclusive == 0);
- }
- bool is_lock_type_compatible(enum_mdl_type type, bool is_upgrade) const;
+ virtual bool can_grant_lock(const MDL_context *requestor_ctx,
+ enum_mdl_type type, bool is_upgrade);
+ virtual void wake_up_waiters();
};
-static MDL_global_lock global_lock;
+/**
+ An implementation of a per-object lock. Supports SHARED, SHARED_UPGRADABLE,
+ SHARED HIGH PRIORITY and EXCLUSIVE locks.
+*/
+
+class MDL_object_lock : public MDL_lock
+{
+public:
+ MDL_object_lock(const MDL_key *key_arg)
+ : MDL_lock(key_arg)
+ { }
+
+ virtual bool can_grant_lock(const MDL_context *requestor_ctx,
+ enum_mdl_type type, bool is_upgrade);
+ virtual void wake_up_waiters();
+};
+static MDL_map mdl_locks;
+
extern "C"
{
static uchar *
@@ -147,12 +226,7 @@ void mdl_init()
{
DBUG_ASSERT(! mdl_initialized);
mdl_initialized= TRUE;
- pthread_mutex_init(&LOCK_mdl, NULL);
- pthread_cond_init(&COND_mdl, NULL);
- my_hash_init(&mdl_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
- mdl_locks_key, 0, 0);
- /* The global lock is zero-initialized by the loader. */
- DBUG_ASSERT(global_lock.is_empty());
+ mdl_locks.init();
}
@@ -168,35 +242,199 @@ void mdl_destroy()
if (mdl_initialized)
{
mdl_initialized= FALSE;
- DBUG_ASSERT(!mdl_locks.records);
- DBUG_ASSERT(global_lock.is_empty());
- pthread_mutex_destroy(&LOCK_mdl);
- pthread_cond_destroy(&COND_mdl);
- my_hash_free(&mdl_locks);
+ mdl_locks.destroy();
+ }
+}
+
+
+/** Initialize the global hash containing all MDL locks. */
+
+void MDL_map::init()
+{
+ pthread_mutex_init(&m_mutex, NULL);
+ my_hash_init(&m_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
+ mdl_locks_key, 0, 0);
+}
+
+
+/**
+ Destroy the global hash containing all MDL locks.
+ @pre It must be empty.
+*/
+
+void MDL_map::destroy()
+{
+ DBUG_ASSERT(!m_locks.records);
+ pthread_mutex_destroy(&m_mutex);
+ my_hash_free(&m_locks);
+}
+
+
+/**
+ Find MDL_lock object corresponding to the key, create it
+ if it does not exist.
+
+ @retval non-NULL - Success. MDL_lock instance for the key with
+ locked MDL_lock::m_mutex.
+ @retval NULL - Failure (OOM).
+*/
+
+MDL_lock* MDL_map::find_or_insert(const MDL_key *mdl_key)
+{
+ MDL_lock *lock;
+
+retry:
+ pthread_mutex_lock(&m_mutex);
+ if (!(lock= (MDL_lock*) my_hash_search(&m_locks,
+ mdl_key->ptr(),
+ mdl_key->length())))
+ {
+ lock= MDL_lock::create(mdl_key);
+ if (!lock || my_hash_insert(&m_locks, (uchar*)lock))
+ {
+ pthread_mutex_unlock(&m_mutex);
+ MDL_lock::destroy(lock);
+ return NULL;
+ }
}
+
+ if (move_from_hash_to_lock_mutex(lock))
+ goto retry;
+
+ return lock;
}
/**
- Initialize a metadata locking context.
+ Find MDL_lock object corresponding to the key.
- This is to be called when a new server connection is created.
+ @retval non-NULL - MDL_lock instance for the key with locked
+ MDL_lock::m_mutex.
+ @retval NULL - There was no MDL_lock for the key.
*/
-void MDL_context::init(THD *thd_arg)
+MDL_lock* MDL_map::find(const MDL_key *mdl_key)
{
- m_has_global_shared_lock= FALSE;
- m_thd= thd_arg;
- m_lt_or_ha_sentinel= NULL;
+ MDL_lock *lock;
+
+retry:
+ pthread_mutex_lock(&m_mutex);
+ if (!(lock= (MDL_lock*) my_hash_search(&m_locks,
+ mdl_key->ptr(),
+ mdl_key->length())))
+ {
+ pthread_mutex_unlock(&m_mutex);
+ return NULL;
+ }
+
+ if (move_from_hash_to_lock_mutex(lock))
+ goto retry;
+
+ return lock;
+}
+
+
+/**
+ Release mdl_locks.m_mutex mutex and lock MDL_lock::m_mutex for lock
+ object from the hash. Handle situation when object was released
+ while the held no mutex.
+
+ @retval FALSE - Success.
+ @retval TRUE - Object was released while we held no mutex, caller
+ should re-try looking up MDL_lock object in the hash.
+*/
+
+bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock)
+{
+ DBUG_ASSERT(! lock->m_is_destroyed);
+ safe_mutex_assert_owner(&m_mutex);
+
+ /*
+ We increment m_ref_usage which is a reference counter protected by
+ mdl_locks.m_mutex under the condition it is present in the hash and
+ m_is_destroyed is FALSE.
+ */
+ lock->m_ref_usage++;
+ pthread_mutex_unlock(&m_mutex);
+
+ pthread_mutex_lock(&lock->m_mutex);
+ lock->m_ref_release++;
+ if (unlikely(lock->m_is_destroyed))
+ {
+ /*
+ Object was released while we held no mutex, we need to
+ release it if no others hold references to it, while our own
+ reference count ensured that the object as such haven't got
+ its memory released yet. We can also safely compare
+ m_ref_usage and m_ref_release since the object is no longer
+ present in the hash so no one will be able to find it and
+ increment m_ref_usage anymore.
+ */
+ uint ref_usage= lock->m_ref_usage;
+ uint ref_release= lock->m_ref_release;
+ pthread_mutex_unlock(&lock->m_mutex);
+ if (ref_usage == ref_release)
+ MDL_lock::destroy(lock);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/**
+ Destroy MDL_lock object or delegate this responsibility to
+ whatever thread that holds the last outstanding reference to
+ it.
+*/
+
+void MDL_map::remove(MDL_lock *lock)
+{
+ uint ref_usage, ref_release;
+
+ safe_mutex_assert_owner(&lock->m_mutex);
+
+ if (lock->cached_object)
+ (*lock->cached_object_release_hook)(lock->cached_object);
+
/*
- FIXME: In reset_n_backup_open_tables_state,
- we abuse "init" as a reset, i.e. call it on an already
- constructed non-empty object. This is why we can't
- rely here on the default constructors of I_P_List
- to empty the list.
+ Destroy the MDL_lock object, but ensure that anyone that is
+ holding a reference to the object is not remaining, if so he
+ has the responsibility to release it.
+
+ Setting of m_is_destroyed to TRUE while holding _both_
+ mdl_locks.m_mutex and MDL_lock::m_mutex mutexes transfers the
+ protection of m_ref_usage from mdl_locks.m_mutex to
+ MDL_lock::m_mutex while removal of object from the hash makes
+ it read-only. Therefore whoever acquires MDL_lock::m_mutex next
+ will see most up to date version of m_ref_usage.
+
+ This means that when m_is_destroyed is TRUE and we hold the
+ MDL_lock::m_mutex we can safely read the m_ref_usage
+ member.
*/
- m_tickets.empty();
- m_is_waiting_in_mdl= FALSE;
+ pthread_mutex_lock(&m_mutex);
+ my_hash_delete(&m_locks, (uchar*) lock);
+ lock->m_is_destroyed= TRUE;
+ ref_usage= lock->m_ref_usage;
+ ref_release= lock->m_ref_release;
+ pthread_mutex_unlock(&lock->m_mutex);
+ pthread_mutex_unlock(&m_mutex);
+ if (ref_usage == ref_release)
+ MDL_lock::destroy(lock);
+}
+
+
+/**
+ Initialize a metadata locking context.
+
+ This is to be called when a new server connection is created.
+*/
+
+MDL_context::MDL_context()
+ :m_lt_or_ha_sentinel(NULL),
+ m_thd(NULL)
+{
+ pthread_cond_init(&m_ctx_wakeup_cond, NULL);
}
@@ -215,7 +453,7 @@ void MDL_context::init(THD *thd_arg)
void MDL_context::destroy()
{
DBUG_ASSERT(m_tickets.is_empty());
- DBUG_ASSERT(! m_has_global_shared_lock);
+ pthread_cond_destroy(&m_ctx_wakeup_cond);
}
@@ -305,13 +543,21 @@ MDL_request::create(MDL_key::enum_mdl_namespace mdl_namespace, const char *db,
/**
Auxiliary functions needed for creation/destruction of MDL_lock objects.
+ @note Also chooses an MDL_lock descendant appropriate for object namespace.
+
@todo This naive implementation should be replaced with one that saves
on memory allocation by reusing released objects.
*/
inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key)
{
- return new MDL_lock(mdl_key);
+ switch (mdl_key->mdl_namespace())
+ {
+ case MDL_key::GLOBAL:
+ return new MDL_global_lock(mdl_key);
+ default:
+ return new MDL_object_lock(mdl_key);
+ }
}
@@ -321,6 +567,8 @@ void MDL_lock::destroy(MDL_lock *lock)
}
+
+
/**
Auxiliary functions needed for creation/destruction of MDL_ticket
objects.
@@ -354,18 +602,21 @@ void MDL_ticket::destroy(MDL_ticket *ticket)
will probably introduce too much overhead.
*/
-#define MDL_ENTER_COND(A, B) mdl_enter_cond(A, B, __func__, __FILE__, __LINE__)
+#define MDL_ENTER_COND(A, B, C, D) \
+ mdl_enter_cond(A, B, C, D, __func__, __FILE__, __LINE__)
static inline const char *mdl_enter_cond(THD *thd,
st_my_thread_var *mysys_var,
+ pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
const char *calling_func,
const char *calling_file,
const unsigned int calling_line)
{
- safe_mutex_assert_owner(&LOCK_mdl);
+ safe_mutex_assert_owner(mutex);
- mysys_var->current_mutex= &LOCK_mdl;
- mysys_var->current_cond= &COND_mdl;
+ mysys_var->current_mutex= mutex;
+ mysys_var->current_cond= cond;
DEBUG_SYNC(thd, "mdl_enter_cond");
@@ -373,18 +624,20 @@ static inline const char *mdl_enter_cond(THD *thd,
calling_func, calling_file, calling_line);
}
-#define MDL_EXIT_COND(A, B, C) mdl_exit_cond(A, B, C, __func__, __FILE__, __LINE__)
+#define MDL_EXIT_COND(A, B, C, D) \
+ mdl_exit_cond(A, B, C, D, __func__, __FILE__, __LINE__)
static inline void mdl_exit_cond(THD *thd,
st_my_thread_var *mysys_var,
+ pthread_mutex_t *mutex,
const char* old_msg,
const char *calling_func,
const char *calling_file,
const unsigned int calling_line)
{
- DBUG_ASSERT(&LOCK_mdl == mysys_var->current_mutex);
+ DBUG_ASSERT(mutex == mysys_var->current_mutex);
- pthread_mutex_unlock(&LOCK_mdl);
+ pthread_mutex_unlock(mutex);
pthread_mutex_lock(&mysys_var->mutex);
mysys_var->current_mutex= 0;
mysys_var->current_cond= 0;
@@ -398,15 +651,14 @@ static inline void mdl_exit_cond(THD *thd,
/**
- Check if request for the lock on particular object can be satisfied given
- current state of the global metadata lock.
+ Check if request for the global metadata lock can be satisfied given
+ its current state,
- @note In other words, we're trying to check that the individual lock
- request, implying a form of lock on the global metadata, is
- compatible with the current state of the global metadata lock.
-
- @param mdl_request Request for lock on an individual object, implying a
- certain kind of global metadata lock.
+ @param requestor_ctx The context that identifies the owner of the request.
+ @param type_arg The requested type of global lock. Usually derived
+ from the type of lock on individual object to be
+ requested. See table below.
+ @param is_upgrade TRUE if we are performing lock upgrade (not unused).
@retval TRUE - Lock request can be satisfied
@retval FALSE - There is some conflicting lock
@@ -426,7 +678,7 @@ static inline void mdl_exit_cond(THD *thd,
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
- "0" -- means impossible situation which will trigger assert
+ "0" -- means impossible situation.
(*) Since for upgradable shared locks we always take intention exclusive
global lock at the same time when obtaining the shared lock, there
@@ -436,62 +688,98 @@ static inline void mdl_exit_cond(THD *thd,
*/
bool
-MDL_global_lock::is_lock_type_compatible(enum_mdl_type type,
- bool is_upgrade) const
+MDL_global_lock::can_grant_lock(const MDL_context *requestor_ctx,
+ enum_mdl_type type_arg,
+ bool is_upgrade)
{
- switch (type)
+ switch (type_arg)
{
case MDL_SHARED:
- case MDL_SHARED_HIGH_PRIO:
- return TRUE;
- break;
- case MDL_SHARED_UPGRADABLE:
- if (active_shared || waiting_shared)
+ if (! granted.is_empty() && granted.front()->m_type == MDL_INTENTION_EXCLUSIVE)
{
/*
- We are going to obtain intention exclusive global lock and
- there is active or pending shared global lock. Have to wait.
+ We are going to obtain global shared lock and there is active
+ intention exclusive lock. Have to wait.
*/
return FALSE;
}
- else
- return TRUE;
+ return TRUE;
break;
- case MDL_EXCLUSIVE:
- if (is_upgrade)
+ case MDL_INTENTION_EXCLUSIVE:
+ if ((! granted.is_empty() && granted.front()->m_type == MDL_SHARED) ||
+ ! waiting_shared.is_empty())
{
/*
- We are upgrading MDL_SHARED to MDL_EXCLUSIVE.
-
- There should be no conflicting global locks since for each upgradable
- shared lock we obtain intention exclusive global lock first.
+ We are going to obtain intention exclusive global lock and
+ there is active or pending shared global lock. Have to wait.
*/
- DBUG_ASSERT(active_shared == 0 && active_intention_exclusive);
- return TRUE;
+ return FALSE;
}
else
- {
- if (active_shared || waiting_shared)
- {
- /*
- We are going to obtain intention exclusive global lock and
- there is active or pending shared global lock.
- */
- return FALSE;
- }
- else
- return TRUE;
- }
+ return TRUE;
break;
default:
DBUG_ASSERT(0);
+ break;
}
return FALSE;
}
/**
- Check if request for the lock can be satisfied given current state of lock.
+ Wake up contexts which are waiting to acquire the global
+ metadata lock and which may succeed now, when we released it, or
+ removed a blocking request for it from the waiters list.
+ The latter can happen when the context trying to acquire the
+ global shared lock is killed.
+*/
+
+void MDL_global_lock::wake_up_waiters()
+{
+ /*
+ If there are no active locks or they are of INTENTION
+ EXCLUSIVE type and there are no pending requests for global
+ SHARED lock, wake up contexts waiting for an INTENTION
+ EXCLUSIVE lock.
+ This happens when we release the global SHARED lock or abort
+ or remove a pending request for it, i.e. abort the
+ context waiting for it.
+ */
+ if ((granted.is_empty() ||
+ granted.front()->m_type == MDL_INTENTION_EXCLUSIVE) &&
+ waiting_shared.is_empty() && ! waiting_exclusive.is_empty())
+ {
+ MDL_lock::Ticket_iterator it(waiting_exclusive);
+ MDL_ticket *awake_ticket;
+ while ((awake_ticket= it++))
+ awake_ticket->get_ctx()->awake();
+ }
+
+ /*
+ If there are no active locks, wake up contexts waiting for
+ the global shared lock (happens when an INTENTION EXCLUSIVE
+ lock is released).
+
+ We don't wake up contexts waiting for the global shared lock
+ if there is an active global shared lock since such situation
+ is transient and in it contexts marked as waiting for global
+ shared lock must be already woken up and simply have not
+ managed to update lock object yet.
+ */
+ if (granted.is_empty() &&
+ ! waiting_shared.is_empty())
+ {
+ MDL_lock::Ticket_iterator it(waiting_shared);
+ MDL_ticket *awake_ticket;
+ while ((awake_ticket= it++))
+ awake_ticket->get_ctx()->awake();
+ }
+}
+
+
+/**
+ Check if request for the per-object lock can be satisfied given current
+ state of the lock.
@param requestor_ctx The context that identifies the owner of the request.
@param type_arg The requested lock type.
@@ -523,8 +811,9 @@ MDL_global_lock::is_lock_type_compatible(enum_mdl_type type,
*/
bool
-MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_arg,
- bool is_upgrade)
+MDL_object_lock::can_grant_lock(const MDL_context *requestor_ctx,
+ enum_mdl_type type_arg,
+ bool is_upgrade)
{
bool can_grant= FALSE;
@@ -532,10 +821,10 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar
case MDL_SHARED:
case MDL_SHARED_UPGRADABLE:
case MDL_SHARED_HIGH_PRIO:
- if (type == MDL_lock::MDL_LOCK_SHARED)
+ if (granted.is_empty() || granted.front()->is_shared())
{
/* Pending exclusive locks have higher priority over shared locks. */
- if (waiting.is_empty() || type_arg == MDL_SHARED_HIGH_PRIO)
+ if (waiting_exclusive.is_empty() || type_arg == MDL_SHARED_HIGH_PRIO)
can_grant= TRUE;
}
else if (granted.front()->get_ctx() == requestor_ctx)
@@ -559,7 +848,7 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar
There should be no active exclusive locks since we own shared lock
on the object.
*/
- DBUG_ASSERT(type == MDL_lock::MDL_LOCK_SHARED);
+ DBUG_ASSERT(granted.front()->is_shared());
while ((conflicting_ticket= it++))
{
@@ -576,9 +865,13 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar
can_grant= TRUE;
break;
}
- else if (type == MDL_lock::MDL_LOCK_SHARED)
+ else if (granted.is_empty())
{
- can_grant= granted.is_empty();
+ /*
+ We are trying to acquire fresh MDL_EXCLUSIVE and there are no active
+ shared or exclusive locks.
+ */
+ can_grant= TRUE;
}
break;
default:
@@ -589,6 +882,44 @@ MDL_lock::can_grant_lock(const MDL_context *requestor_ctx, enum_mdl_type type_ar
/**
+ Wake up contexts which are waiting to acquire lock on individual object
+ and which may succeed now, when we released some lock on it or removed
+ some pending request from its waiters list (the latter can happen, for
+ example, when context trying to acquire exclusive lock is killed).
+*/
+
+void MDL_object_lock::wake_up_waiters()
+{
+ /*
+ There are no active locks or they are of shared type.
+ We have to wake up contexts waiting for shared lock even if there is
+ a pending exclusive lock as some them might be trying to acquire high
+ priority shared lock.
+ */
+ if ((granted.is_empty() || granted.front()->is_shared()) &&
+ ! waiting_shared.is_empty())
+ {
+ MDL_lock::Ticket_iterator it(waiting_shared);
+ MDL_ticket *waiting_ticket;
+ while ((waiting_ticket= it++))
+ waiting_ticket->get_ctx()->awake();
+ }
+
+ /*
+ There are no active locks (shared or exclusive).
+ Wake up contexts waiting to acquire exclusive locks.
+ */
+ if (granted.is_empty() && ! waiting_exclusive.is_empty())
+ {
+ MDL_lock::Ticket_iterator it(waiting_exclusive);
+ MDL_ticket *waiting_ticket;
+ while ((waiting_ticket= it++))
+ waiting_ticket->get_ctx()->awake();
+ }
+}
+
+
+/**
Check whether the context already holds a compatible lock ticket
on an object.
Start searching the transactional locks. If not
@@ -626,47 +957,207 @@ MDL_context::find_ticket(MDL_request *mdl_request,
/**
- Try to acquire one shared lock.
+ Try to acquire global intention exclusive lock.
- Unlike exclusive locks, shared locks are acquired one by
- one. This is interface is chosen to simplify introduction of
- the new locking API to the system. MDL_context::try_acquire_shared_lock()
- is currently used from open_table(), and there we have only one
- table to work with.
+ @param[in/out] mdl_request Lock request object for lock to be acquired
- In future we may consider allocating multiple shared locks at once.
+ @retval FALSE Success. The lock may have not been acquired.
+ One needs to check value of 'MDL_request::ticket'
+ to find out what has happened.
+ @retval TRUE Error.
+*/
+
+bool
+MDL_context::
+try_acquire_global_intention_exclusive_lock(MDL_request *mdl_request)
+{
+ DBUG_ASSERT(mdl_request->key.mdl_namespace() == MDL_key::GLOBAL &&
+ mdl_request->type == MDL_INTENTION_EXCLUSIVE);
+
+ if (is_global_lock_owner(MDL_SHARED))
+ {
+ my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
+ return TRUE;
+ }
+
+ return try_acquire_lock_impl(mdl_request);
+}
+
+
+/**
+ Acquire one lock with waiting for conflicting locks to go away if needed.
+
+ @note This is an internal method which should not be used outside of MDL
+ subsystem as in most cases simply waiting for conflicting locks to
+ go away will lead to deadlock.
+
+ @param mdl_request [in/out] Lock request object for lock to be acquired
+
+ @retval FALSE Success. MDL_request::ticket points to the ticket
+ for the lock.
+ @retval TRUE Failure (Out of resources or waiting is aborted),
+*/
+
+bool
+MDL_context::acquire_lock_impl(MDL_request *mdl_request)
+{
+ bool not_used;
+ MDL_ticket *ticket;
+ MDL_key *key= &mdl_request->key;
+ MDL_lock *lock;
+ const char *old_msg;
+ st_my_thread_var *mysys_var= my_thread_var;
+
+ DBUG_ASSERT(mdl_request->ticket == NULL);
+ safe_mutex_assert_not_owner(&LOCK_open);
+
+ /*
+ Grant lock without waiting if this context already owns this type of lock
+ on this object.
+
+ The fact that we don't wait in such situation allows to avoid deadlocks
+ in cases when pending request for global shared lock pops up after the
+ moment when thread has acquired its first intention exclusive lock but
+ before it has requested the second instance of such lock.
+ */
+ if ((mdl_request->ticket= find_ticket(mdl_request, &not_used)))
+ return FALSE;
+
+ if (! (ticket= MDL_ticket::create(this, mdl_request->type)))
+ return TRUE;
+
+ /* The below call also implicitly locks MDL_lock::m_mutex. */
+ if (! (lock= mdl_locks.find_or_insert(key)))
+ {
+ MDL_ticket::destroy(ticket);
+ return TRUE;
+ }
+
+ old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_ctx_wakeup_cond,
+ &lock->m_mutex);
+
+ if (! lock->can_grant_lock(this, mdl_request->type, FALSE))
+ {
+ if (mdl_request->is_shared())
+ lock->waiting_shared.push_front(ticket);
+ else
+ lock->waiting_exclusive.push_front(ticket);
+
+ do
+ {
+ pthread_cond_wait(&m_ctx_wakeup_cond, &lock->m_mutex);
+ }
+ while (! lock->can_grant_lock(this, mdl_request->type, FALSE) &&
+ ! mysys_var->abort);
+
+ if (mysys_var->abort)
+ {
+ /*
+ We have to do MDL_EXIT_COND here and then re-acquire the lock
+ as there is a chance that we will destroy MDL_lock object and
+ won't be able to call MDL_EXIT_COND after it.
+ */
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ pthread_mutex_lock(&lock->m_mutex);
+ /* Get rid of pending ticket. */
+ if (mdl_request->is_shared())
+ lock->waiting_shared.remove(ticket);
+ else
+ lock->waiting_exclusive.remove(ticket);
+ if (lock->is_empty())
+ mdl_locks.remove(lock);
+ else
+ {
+ lock->wake_up_waiters();
+ pthread_mutex_unlock(&lock->m_mutex);
+ }
+ MDL_ticket::destroy(ticket);
+ return TRUE;
+ }
+
+ if (mdl_request->is_shared())
+ lock->waiting_shared.remove(ticket);
+ else
+ lock->waiting_exclusive.remove(ticket);
+ }
+
+ lock->granted.push_front(ticket);
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ ticket->m_state= MDL_ACQUIRED;
+ ticket->m_lock= lock;
+
+ m_tickets.push_front(ticket);
+
+ mdl_request->ticket= ticket;
+
+ return FALSE;
+}
+
+
+/**
+ Acquire global intention exclusive lock.
+
+ @param[in] mdl_request Lock request object for lock to be acquired
+
+ @retval FALSE Success. The lock has been acquired.
+ @retval TRUE Error.
+*/
+
+bool
+MDL_context::acquire_global_intention_exclusive_lock(MDL_request *mdl_request)
+{
+ DBUG_ASSERT(mdl_request->key.mdl_namespace() == MDL_key::GLOBAL &&
+ mdl_request->type == MDL_INTENTION_EXCLUSIVE);
+
+ if (is_global_lock_owner(MDL_SHARED))
+ {
+ my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
+ return TRUE;
+ }
+
+ /*
+ If this is a non-recursive attempt to acquire global intention
+ exclusive lock we might have to wait until active global shared
+ lock or pending requests will go away. Since we won't hold any
+ resources (except associated with open HANDLERs) while doing it
+ deadlocks are not possible,
+ */
+ DBUG_ASSERT(is_global_lock_owner(MDL_INTENTION_EXCLUSIVE) ||
+ ! has_locks() ||
+ (m_lt_or_ha_sentinel &&
+ m_tickets.front() == m_lt_or_ha_sentinel));
+
+ return acquire_lock_impl(mdl_request);
+}
+
+
+/**
+ Try to acquire one lock.
@param mdl_request [in/out] Lock request object for lock to be acquired
@retval FALSE Success. The lock may have not been acquired.
Check the ticket, if it's NULL, a conflicting lock
- exists and another attempt should be made after releasing
- all current locks and waiting for conflicting lock go
- away (using MDL_context::wait_for_locks()).
+ exists.
@retval TRUE Out of resources, an error has been reported.
*/
bool
-MDL_context::try_acquire_shared_lock(MDL_request *mdl_request)
+MDL_context::try_acquire_lock_impl(MDL_request *mdl_request)
{
MDL_lock *lock;
MDL_key *key= &mdl_request->key;
MDL_ticket *ticket;
bool is_lt_or_ha;
- DBUG_ASSERT(mdl_request->is_shared() && mdl_request->ticket == NULL);
+ DBUG_ASSERT(mdl_request->ticket == NULL);
/* Don't take chances in production. */
mdl_request->ticket= NULL;
safe_mutex_assert_not_owner(&LOCK_open);
- if (m_has_global_shared_lock &&
- mdl_request->type == MDL_SHARED_UPGRADABLE)
- {
- my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
- return TRUE;
- }
-
/*
Check whether the context already holds a shared lock on the object,
and if so, grant the request.
@@ -674,8 +1165,7 @@ MDL_context::try_acquire_shared_lock(MDL_request *mdl_request)
if ((ticket= find_ticket(mdl_request, &is_lt_or_ha)))
{
DBUG_ASSERT(ticket->m_state == MDL_ACQUIRED);
- /* Only shared locks can be recursive. */
- DBUG_ASSERT(ticket->is_shared());
+ DBUG_ASSERT(ticket->m_type == mdl_request->type);
/*
If the request is for a transactional lock, and we found
a transactional lock, just reuse the found ticket.
@@ -703,57 +1193,73 @@ MDL_context::try_acquire_shared_lock(MDL_request *mdl_request)
return FALSE;
}
- pthread_mutex_lock(&LOCK_mdl);
-
- if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
- {
- pthread_mutex_unlock(&LOCK_mdl);
- return FALSE;
- }
-
if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
- {
- pthread_mutex_unlock(&LOCK_mdl);
return TRUE;
- }
- if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
- key->ptr(), key->length())))
+ /* The below call also implicitly locks MDL_lock::m_mutex. */
+ if (!(lock= mdl_locks.find_or_insert(key)))
{
- /* Default lock type is MDL_lock::MDL_LOCK_SHARED */
- lock= MDL_lock::create(key);
- if (!lock || my_hash_insert(&mdl_locks, (uchar*)lock))
- {
- MDL_lock::destroy(lock);
- MDL_ticket::destroy(ticket);
- pthread_mutex_unlock(&LOCK_mdl);
- return TRUE;
- }
+ MDL_ticket::destroy(ticket);
+ return TRUE;
}
if (lock->can_grant_lock(this, mdl_request->type, FALSE))
{
- mdl_request->ticket= ticket;
lock->granted.push_front(ticket);
- m_tickets.push_front(ticket);
+ pthread_mutex_unlock(&lock->m_mutex);
+
ticket->m_state= MDL_ACQUIRED;
ticket->m_lock= lock;
- if (mdl_request->type == MDL_SHARED_UPGRADABLE)
- global_lock.active_intention_exclusive++;
+
+ m_tickets.push_front(ticket);
+
+ mdl_request->ticket= ticket;
}
else
{
/* We can't get here if we allocated a new lock. */
DBUG_ASSERT(! lock->is_empty());
+ pthread_mutex_unlock(&lock->m_mutex);
MDL_ticket::destroy(ticket);
}
- pthread_mutex_unlock(&LOCK_mdl);
return FALSE;
}
/**
+ Try to acquire one shared lock.
+
+ Unlike exclusive locks, shared locks are acquired one by
+ one. This is interface is chosen to simplify introduction of
+ the new locking API to the system. MDL_context::try_acquire_shared_lock()
+ is currently used from open_table(), and there we have only one
+ table to work with.
+
+ In future we may consider allocating multiple shared locks at once.
+
+ @param mdl_request [in/out] Lock request object for lock to be acquired
+
+ @retval FALSE Success. The lock may have not been acquired.
+ Check the ticket, if it's NULL, a conflicting lock
+ exists and another attempt should be made after releasing
+ all current locks and waiting for conflicting lock go
+ away (using MDL_context::wait_for_locks()).
+ @retval TRUE Out of resources, an error has been reported.
+*/
+
+bool
+MDL_context::try_acquire_shared_lock(MDL_request *mdl_request)
+{
+ DBUG_ASSERT(mdl_request->is_shared());
+ DBUG_ASSERT(mdl_request->type != MDL_SHARED_UPGRADABLE ||
+ is_global_lock_owner(MDL_INTENTION_EXCLUSIVE));
+
+ return try_acquire_lock_impl(mdl_request);
+}
+
+
+/**
Create a copy of a granted ticket.
This is used to make sure that HANDLER ticket
is never shared with a ticket that belongs to
@@ -782,11 +1288,9 @@ MDL_context::clone_ticket(MDL_request *mdl_request)
ticket->m_lock= mdl_request->ticket->m_lock;
mdl_request->ticket= ticket;
- pthread_mutex_lock(&LOCK_mdl);
+ pthread_mutex_lock(&ticket->m_lock->m_mutex);
ticket->m_lock->granted.push_front(ticket);
- if (mdl_request->type == MDL_SHARED_UPGRADABLE)
- global_lock.active_intention_exclusive++;
- pthread_mutex_unlock(&LOCK_mdl);
+ pthread_mutex_unlock(&ticket->m_lock->m_mutex);
m_tickets.push_front(ticket);
@@ -799,225 +1303,291 @@ MDL_context::clone_ticket(MDL_request *mdl_request)
@param thd Current thread context
@param conflicting_ticket Conflicting metadata lock
-
- @retval TRUE A thread was woken up
- @retval FALSE Lock is not a shared one or no thread was woken up
*/
-bool notify_shared_lock(THD *thd, MDL_ticket *conflicting_ticket)
+void notify_shared_lock(THD *thd, MDL_ticket *conflicting_ticket)
{
- bool woke= FALSE;
if (conflicting_ticket->is_shared())
{
THD *conflicting_thd= conflicting_ticket->get_ctx()->get_thd();
DBUG_ASSERT(thd != conflicting_thd); /* Self-deadlock */
/*
- If the thread that holds the conflicting lock is waiting
- on an MDL lock, wake it up by broadcasting on COND_mdl.
- Otherwise it must be waiting on a table-level lock
- or some other non-MDL resource, so delegate its waking up
- to an external call.
+ If the thread that holds the conflicting lock is waiting in MDL
+ subsystem it has to be woken up by calling MDL_context::awake().
*/
- if (conflicting_ticket->get_ctx()->is_waiting_in_mdl())
- {
- pthread_cond_broadcast(&COND_mdl);
- woke= TRUE;
- }
- else
- woke= mysql_notify_thread_having_shared_lock(thd, conflicting_thd);
+ conflicting_ticket->get_ctx()->awake();
+ /*
+ If it is waiting on table-level lock or some other non-MDL resource
+ we delegate its waking up to code outside of MDL.
+ */
+ mysql_notify_thread_having_shared_lock(thd, conflicting_thd);
}
- return woke;
}
/**
- Acquire a single exclusive lock. A convenience
- wrapper around the method acquiring a list of locks.
-*/
+ Auxiliary method for acquiring an exclusive lock.
-bool MDL_context::acquire_exclusive_lock(MDL_request *mdl_request)
-{
- MDL_request_list mdl_requests;
- mdl_requests.push_front(mdl_request);
- return acquire_exclusive_locks(&mdl_requests);
-}
+ @param mdl_request Request for the lock to be acqured.
-
-/**
- Acquire exclusive locks. The context must contain the list of
- locks to be acquired. There must be no granted locks in the
- context.
-
- This is a replacement of lock_table_names(). It is used in
- RENAME, DROP and other DDL SQL statements.
-
- @note The MDL context may not have non-exclusive lock requests
- or acquired locks.
+ @note Should not be used outside of MDL subsystem. Instead one should
+ call acquire_exclusive_lock() or acquire_exclusive_locks() methods
+ which ensure that conditions for deadlock-free lock acquisition are
+ fulfilled.
@retval FALSE Success
@retval TRUE Failure
*/
-bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests)
+bool MDL_context::acquire_exclusive_lock_impl(MDL_request *mdl_request)
{
MDL_lock *lock;
- bool signalled= FALSE;
const char *old_msg;
- MDL_request *mdl_request;
MDL_ticket *ticket;
+ bool not_used;
st_my_thread_var *mysys_var= my_thread_var;
- MDL_request_list::Iterator it(*mdl_requests);
+ MDL_key *key= &mdl_request->key;
+
+ DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
+ mdl_request->ticket == NULL);
safe_mutex_assert_not_owner(&LOCK_open);
- /* Exclusive locks must always be acquired first, all at once. */
- DBUG_ASSERT(! has_locks() ||
- (m_lt_or_ha_sentinel &&
- m_tickets.front() == m_lt_or_ha_sentinel));
- if (m_has_global_shared_lock)
+ /* Don't take chances in production. */
+ mdl_request->ticket= NULL;
+
+ /*
+ Check whether the context already holds an exclusive lock on the object,
+ and if so, grant the request.
+ */
+ if ((ticket= find_ticket(mdl_request, &not_used)))
{
- my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
- return TRUE;
+ DBUG_ASSERT(ticket->m_state == MDL_ACQUIRED);
+ DBUG_ASSERT(ticket->m_type == MDL_EXCLUSIVE);
+ mdl_request->ticket= ticket;
+ return FALSE;
}
- pthread_mutex_lock(&LOCK_mdl);
+ DBUG_ASSERT(is_global_lock_owner(MDL_INTENTION_EXCLUSIVE));
- old_msg= MDL_ENTER_COND(m_thd, mysys_var);
+ /* Early allocation: ticket will be needed in any case. */
+ if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
+ return TRUE;
- while ((mdl_request= it++))
+ /* The below call also implicitly locks MDL_lock::m_mutex. */
+ if (!(lock= mdl_locks.find_or_insert(key)))
{
- MDL_key *key= &mdl_request->key;
- DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
- mdl_request->ticket == NULL);
-
- /* Don't take chances in production. */
- mdl_request->ticket= NULL;
-
- /* Early allocation: ticket is used as a shortcut to the lock. */
- if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
- goto err;
-
- if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
- key->ptr(), key->length())))
- {
- lock= MDL_lock::create(key);
- if (!lock || my_hash_insert(&mdl_locks, (uchar*)lock))
- {
- MDL_ticket::destroy(ticket);
- MDL_lock::destroy(lock);
- goto err;
- }
- }
-
- mdl_request->ticket= ticket;
- lock->waiting.push_front(ticket);
- ticket->m_lock= lock;
+ MDL_ticket::destroy(ticket);
+ return TRUE;
}
- while (1)
- {
- it.rewind();
- while ((mdl_request= it++))
- {
- lock= mdl_request->ticket->m_lock;
-
- if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
- {
- /*
- Someone owns or wants to acquire the global shared lock so
- we have to wait until he goes away.
- */
- signalled= TRUE;
- break;
- }
- else if (!lock->can_grant_lock(this, mdl_request->type, FALSE))
- {
- MDL_ticket *conflicting_ticket;
- MDL_lock::Ticket_iterator it(lock->granted);
+ lock->waiting_exclusive.push_front(ticket);
- signalled= (lock->type == MDL_lock::MDL_LOCK_EXCLUSIVE);
-
- while ((conflicting_ticket= it++))
- signalled|= notify_shared_lock(m_thd, conflicting_ticket);
-
- break;
- }
- }
- if (!mdl_request)
- break;
+ old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_ctx_wakeup_cond,
+ &lock->m_mutex);
+ while (!lock->can_grant_lock(this, mdl_request->type, FALSE))
+ {
if (m_lt_or_ha_sentinel)
{
/*
We're about to start waiting. Don't do it if we have
HANDLER locks (we can't have any other locks here).
Waiting with locks may lead to a deadlock.
+
+ We have to do MDL_EXIT_COND here and then re-acquire the
+ lock as there is a chance that we will destroy MDL_lock
+ object and won't be able to call MDL_EXIT_COND after it.
*/
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ pthread_mutex_lock(&lock->m_mutex);
+ /* Get rid of pending ticket. */
+ lock->waiting_exclusive.remove(ticket);
+ if (lock->is_empty())
+ mdl_locks.remove(lock);
+ else
+ {
+ /*
+ There can be some contexts waiting to acquire shared
+ lock which now might be able to do it. Wake them up!
+ */
+ lock->wake_up_waiters();
+ pthread_mutex_unlock(&lock->m_mutex);
+ }
+ MDL_ticket::destroy(ticket);
my_error(ER_LOCK_DEADLOCK, MYF(0));
- goto err;
+ return TRUE;
}
+ MDL_ticket *conflicting_ticket;
+ MDL_lock::Ticket_iterator it(lock->granted);
+
+ while ((conflicting_ticket= it++))
+ notify_shared_lock(m_thd, conflicting_ticket);
+
/* There is a shared or exclusive lock on the object. */
DEBUG_SYNC(m_thd, "mdl_acquire_exclusive_locks_wait");
- if (signalled)
- pthread_cond_wait(&COND_mdl, &LOCK_mdl);
- else
+ /*
+ Another thread might have obtained a shared MDL lock on some table
+ but has not yet opened it and/or tried to obtain data lock on it.
+ Also invocation of acquire_exclusive_lock() method and consequently
+ first call to notify_shared_lock() might have happened right after
+ thread holding shared metadata lock in wait_for_locks() method
+ checked that there are no pending conflicting locks but before
+ it has started waiting.
+ In both these cases we need to sleep until these threads will start
+ waiting and try to abort them once again.
+
+ QQ: What is the optimal value for this sleep?
+ */
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&m_ctx_wakeup_cond, &lock->m_mutex, &abstime);
+
+ if (mysys_var->abort)
{
/*
- Another thread obtained a shared MDL lock on some table but
- has not yet opened it and/or tried to obtain data lock on
- it. In this case we need to wait until this happens and try
- to abort this thread once again.
+ We have to do MDL_EXIT_COND here and then re-acquire the lock
+ as there is a chance that we will destroy MDL_lock object and
+ won't be able to call MDL_EXIT_COND after it.
*/
- struct timespec abstime;
- set_timespec(abstime, 1);
- pthread_cond_timedwait(&COND_mdl, &LOCK_mdl, &abstime);
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ pthread_mutex_lock(&lock->m_mutex);
+ /* Get rid of pending ticket. */
+ lock->waiting_exclusive.remove(ticket);
+ if (lock->is_empty())
+ mdl_locks.remove(lock);
+ else
+ {
+ /*
+ There can be some contexts waiting to acquire shared
+ lock which now might be able to do it. Wake them up!
+ */
+ lock->wake_up_waiters();
+ pthread_mutex_unlock(&lock->m_mutex);
+ }
+ MDL_ticket::destroy(ticket);
+ return TRUE;
}
- if (mysys_var->abort)
- goto err;
}
- it.rewind();
- while ((mdl_request= it++))
+
+ lock->waiting_exclusive.remove(ticket);
+ lock->granted.push_front(ticket);
+
+ if (lock->cached_object)
+ (*lock->cached_object_release_hook)(lock->cached_object);
+ lock->cached_object= NULL;
+
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ ticket->m_state= MDL_ACQUIRED;
+ ticket->m_lock= lock;
+
+ m_tickets.push_front(ticket);
+
+ mdl_request->ticket= ticket;
+
+ return FALSE;
+}
+
+
+/**
+ Acquire an exclusive lock.
+
+ @param mdl_request Request for the lock to be acqured.
+
+ @note Assumes that one already owns global intention exclusive lock.
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool MDL_context::acquire_exclusive_lock(MDL_request *mdl_request)
+{
+ /* Exclusive locks must always be acquired first, all at once. */
+ DBUG_ASSERT(! m_tickets.is_empty() &&
+ m_tickets.front()->m_lock->key.mdl_namespace() == MDL_key::GLOBAL &&
+ ++Ticket_list::Iterator(m_tickets) == m_lt_or_ha_sentinel);
+
+ return acquire_exclusive_lock_impl(mdl_request);
+}
+
+
+extern "C" int mdl_request_ptr_cmp(const void* ptr1, const void* ptr2)
+{
+ MDL_request *req1= *(MDL_request**)ptr1;
+ MDL_request *req2= *(MDL_request**)ptr2;
+ return req1->key.cmp(&req2->key);
+}
+
+
+/**
+ Acquire exclusive locks. There must be no granted locks in the
+ context.
+
+ This is a replacement of lock_table_names(). It is used in
+ RENAME, DROP and other DDL SQL statements.
+
+ @param mdl_requests List of requests for locks to be acquired.
+
+ @note The list of requests should not contain non-exclusive lock requests.
+ There should not be any acquired locks in the context.
+
+ @note Assumes that one already owns global intention exclusive lock.
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests)
+{
+ MDL_request_list::Iterator it(*mdl_requests);
+ MDL_request **sort_buf;
+ uint i;
+
+ /*
+ Exclusive locks must always be acquired first, all at once.
+ */
+ DBUG_ASSERT(! m_tickets.is_empty() &&
+ m_tickets.front()->m_lock->key.mdl_namespace() == MDL_key::GLOBAL &&
+ ++Ticket_list::Iterator(m_tickets) == m_lt_or_ha_sentinel);
+
+ if (mdl_requests->is_empty())
+ return FALSE;
+
+ /* Sort requests according to MDL_key. */
+ if (! (sort_buf= (MDL_request **)my_malloc(mdl_requests->elements() *
+ sizeof(MDL_request *),
+ MYF(MY_WME))))
+ return TRUE;
+
+ for (i= 0; i < mdl_requests->elements(); i++)
+ sort_buf[i]= it++;
+
+ my_qsort(sort_buf, mdl_requests->elements(), sizeof(MDL_request*),
+ mdl_request_ptr_cmp);
+
+ for (i= 0; i < mdl_requests->elements(); i++)
{
- global_lock.active_intention_exclusive++;
- ticket= mdl_request->ticket;
- lock= ticket->m_lock;
- lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
- lock->waiting.remove(ticket);
- lock->granted.push_front(ticket);
- m_tickets.push_front(ticket);
- ticket->m_state= MDL_ACQUIRED;
- if (lock->cached_object)
- (*lock->cached_object_release_hook)(lock->cached_object);
- lock->cached_object= NULL;
+ if (acquire_exclusive_lock_impl(sort_buf[i]))
+ goto err;
}
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+ my_free(sort_buf, MYF(0));
return FALSE;
err:
- /* Remove our pending tickets from the locks. */
- it.rewind();
- while ((mdl_request= it++) && mdl_request->ticket)
+ /* Release locks we have managed to acquire so far. */
+ for (i= 0; i < mdl_requests->elements() && sort_buf[i]->ticket; i++)
{
- ticket= mdl_request->ticket;
- DBUG_ASSERT(ticket->m_state == MDL_PENDING);
- lock= ticket->m_lock;
- lock->waiting.remove(ticket);
- MDL_ticket::destroy(ticket);
+ release_lock(sort_buf[i]->ticket);
/* Reset lock request back to its initial state. */
- mdl_request->ticket= NULL;
- if (lock->is_empty())
- {
- my_hash_delete(&mdl_locks, (uchar *)lock);
- MDL_lock::destroy(lock);
- }
+ sort_buf[i]->ticket= NULL;
}
- /* May be some pending requests for shared locks can be satisfied now. */
- pthread_cond_broadcast(&COND_mdl);
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+ my_free(sort_buf, MYF(0));
return TRUE;
}
@@ -1062,6 +1632,12 @@ MDL_ticket::upgrade_shared_lock_to_exclusive()
DBUG_ASSERT(m_type == MDL_SHARED_UPGRADABLE);
/*
+ Since we should have already acquired an intention exclusive
+ global lock this call is only enforcing asserts.
+ */
+ DBUG_ASSERT(m_ctx->is_global_lock_owner(MDL_INTENTION_EXCLUSIVE));
+
+ /*
Create an auxiliary ticket to represent a pending exclusive
lock and add it to the 'waiting' queue for the duration
of upgrade. During upgrade we abort waits of connections
@@ -1072,24 +1648,21 @@ MDL_ticket::upgrade_shared_lock_to_exclusive()
if (! (pending_ticket= MDL_ticket::create(m_ctx, MDL_EXCLUSIVE)))
DBUG_RETURN(TRUE);
- pthread_mutex_lock(&LOCK_mdl);
+ pthread_mutex_lock(&m_lock->m_mutex);
- pending_ticket->m_lock= m_lock;
- m_lock->waiting.push_front(pending_ticket);
+ m_lock->waiting_exclusive.push_front(pending_ticket);
- old_msg= MDL_ENTER_COND(thd, mysys_var);
-
- /*
- Since we should have already acquired an intention exclusive
- global lock this call is only enforcing asserts.
- */
- DBUG_ASSERT(global_lock.is_lock_type_compatible(MDL_EXCLUSIVE, TRUE));
+ old_msg= MDL_ENTER_COND(thd, mysys_var, &m_ctx->m_ctx_wakeup_cond,
+ &m_lock->m_mutex);
while (1)
{
if (m_lock->can_grant_lock(m_ctx, MDL_EXCLUSIVE, TRUE))
break;
+ MDL_ticket *conflicting_ticket;
+ MDL_lock::Ticket_iterator it(m_lock->granted);
+
/*
If m_ctx->lt_or_ha_sentinel(), and this sentinel is for HANDLER,
we can deadlock. However, HANDLER is not allowed under
@@ -1113,12 +1686,7 @@ MDL_ticket::upgrade_shared_lock_to_exclusive()
(*) There is no requirement to upgrade lock in
CREATE/DROP TRIGGER, it's used there just for convenience.
- */
- bool signalled= FALSE;
- MDL_ticket *conflicting_ticket;
- MDL_lock::Ticket_iterator it(m_lock->granted);
- /*
A temporary work-around to avoid deadlocks/livelocks in
a situation when in one connection ALTER TABLE tries to
upgrade its metadata lock and in another connection
@@ -1145,53 +1713,57 @@ MDL_ticket::upgrade_shared_lock_to_exclusive()
while ((conflicting_ticket= it++))
{
if (conflicting_ticket->m_ctx != m_ctx)
- signalled|= notify_shared_lock(thd, conflicting_ticket);
+ notify_shared_lock(thd, conflicting_ticket);
}
/* There is a shared or exclusive lock on the object. */
DEBUG_SYNC(thd, "mdl_upgrade_shared_lock_to_exclusive_wait");
- if (signalled)
- pthread_cond_wait(&COND_mdl, &LOCK_mdl);
- else
+ /*
+ Another thread might have obtained a shared MDL lock on some table
+ but has not yet opened it and/or tried to obtain data lock on it.
+ Also invocation of acquire_exclusive_lock() method and consequently
+ first call to notify_shared_lock() might have happened right after
+ thread holding shared metadata lock in wait_for_locks() method
+ checked that there are no pending conflicting locks but before
+ it has started waiting.
+ In both these cases we need to sleep until these threads will start
+ waiting and try to abort them once again.
+ */
+ struct timespec abstime;
+ set_timespec(abstime, 1);
+ pthread_cond_timedwait(&m_ctx->m_ctx_wakeup_cond, &m_lock->m_mutex,
+ &abstime);
+
+ if (mysys_var->abort)
{
+ m_lock->waiting_exclusive.remove(pending_ticket);
/*
- Another thread obtained a shared MDL lock on some table but
- has not yet opened it and/or tried to obtain data lock on
- it. In this case we need to wait until this happens and try
- to abort this thread once again.
+ If there are no other pending requests for exclusive locks
+ we need to wake up threads waiting for a chance to acquire
+ shared lock.
*/
- struct timespec abstime;
- set_timespec(abstime, 1);
- DBUG_PRINT("info", ("Failed to wake-up from table-level lock ... sleeping"));
- pthread_cond_timedwait(&COND_mdl, &LOCK_mdl, &abstime);
- }
- if (mysys_var->abort)
- {
- /* Remove and destroy the auxiliary pending ticket. */
- m_lock->waiting.remove(pending_ticket);
+ m_lock->wake_up_waiters();
+ MDL_EXIT_COND(thd, mysys_var, &m_lock->m_mutex, old_msg);
MDL_ticket::destroy(pending_ticket);
- /* Pending requests for shared locks can be satisfied now. */
- pthread_cond_broadcast(&COND_mdl);
- MDL_EXIT_COND(thd, mysys_var, old_msg);
DBUG_RETURN(TRUE);
}
}
- m_lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
/* Set the new type of lock in the ticket. */
m_type= MDL_EXCLUSIVE;
/* Remove and destroy the auxiliary pending ticket. */
- m_lock->waiting.remove(pending_ticket);
- MDL_ticket::destroy(pending_ticket);
+ m_lock->waiting_exclusive.remove(pending_ticket);
if (m_lock->cached_object)
(*m_lock->cached_object_release_hook)(m_lock->cached_object);
m_lock->cached_object= 0;
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(thd, mysys_var, old_msg);
+ MDL_EXIT_COND(thd, mysys_var, &m_lock->m_mutex, old_msg);
+
+ MDL_ticket::destroy(pending_ticket);
+
DBUG_RETURN(FALSE);
}
@@ -1222,41 +1794,10 @@ MDL_ticket::upgrade_shared_lock_to_exclusive()
bool
MDL_context::try_acquire_exclusive_lock(MDL_request *mdl_request)
{
- MDL_lock *lock;
- MDL_ticket *ticket;
- MDL_key *key= &mdl_request->key;
+ DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE);
+ DBUG_ASSERT(is_global_lock_owner(MDL_INTENTION_EXCLUSIVE));
- DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
- mdl_request->ticket == NULL);
-
- safe_mutex_assert_not_owner(&LOCK_open);
-
- mdl_request->ticket= NULL;
-
- pthread_mutex_lock(&LOCK_mdl);
-
- if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
- key->ptr(), key->length())))
- {
- ticket= MDL_ticket::create(this, mdl_request->type);
- lock= MDL_lock::create(key);
- if (!ticket || !lock || my_hash_insert(&mdl_locks, (uchar*)lock))
- {
- MDL_ticket::destroy(ticket);
- MDL_lock::destroy(lock);
- pthread_mutex_unlock(&LOCK_mdl);
- return TRUE;
- }
- mdl_request->ticket= ticket;
- lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
- lock->granted.push_front(ticket);
- m_tickets.push_front(ticket);
- ticket->m_state= MDL_ACQUIRED;
- ticket->m_lock= lock;
- global_lock.active_intention_exclusive++;
- }
- pthread_mutex_unlock(&LOCK_mdl);
- return FALSE;
+ return try_acquire_lock_impl(mdl_request);
}
@@ -1272,46 +1813,32 @@ MDL_context::try_acquire_exclusive_lock(MDL_request *mdl_request)
bool MDL_context::acquire_global_shared_lock()
{
- st_my_thread_var *mysys_var= my_thread_var;
- const char *old_msg;
+ MDL_request mdl_request;
- safe_mutex_assert_not_owner(&LOCK_open);
- DBUG_ASSERT(!m_has_global_shared_lock);
+ DBUG_ASSERT(! is_global_lock_owner(MDL_SHARED));
- pthread_mutex_lock(&LOCK_mdl);
+ mdl_request.init(MDL_key::GLOBAL, "", "", MDL_SHARED);
- global_lock.waiting_shared++;
- old_msg= MDL_ENTER_COND(m_thd, mysys_var);
+ if (acquire_lock_impl(&mdl_request))
+ return TRUE;
- while (!mysys_var->abort && global_lock.active_intention_exclusive)
- pthread_cond_wait(&COND_mdl, &LOCK_mdl);
+ move_ticket_after_lt_or_ha_sentinel(mdl_request.ticket);
- global_lock.waiting_shared--;
- if (mysys_var->abort)
- {
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
- return TRUE;
- }
- global_lock.active_shared++;
- m_has_global_shared_lock= TRUE;
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
return FALSE;
}
/**
- Check if there are any pending exclusive locks which conflict
- with shared locks held by this thread.
-
- @pre The caller already has acquired LOCK_mdl.
+ Implement a simple deadlock detection heuristic: check if there
+ are any pending exclusive locks which conflict with shared locks
+ held by this thread. In that case waiting can be circular,
+ i.e. lead to a deadlock.
@return TRUE If there are any pending conflicting locks.
FALSE Otherwise.
*/
-bool MDL_context::can_wait_lead_to_deadlock_impl() const
+bool MDL_context::can_wait_lead_to_deadlock() const
{
Ticket_iterator ticket_it(m_tickets);
MDL_ticket *ticket;
@@ -1323,12 +1850,12 @@ bool MDL_context::can_wait_lead_to_deadlock_impl() const
upgradeable shared metadata locks.
Otherwise we would also have to check for the presence of pending
requests for conflicting types of global lock.
- In addition MDL_ticket::has_pending_conflicting_lock_impl()
+ In addition MDL_ticket::has_pending_conflicting_lock()
won't work properly for exclusive type of lock.
*/
DBUG_ASSERT(! ticket->is_upgradable_or_exclusive());
- if (ticket->has_pending_conflicting_lock_impl())
+ if (ticket->has_pending_conflicting_lock())
return TRUE;
}
return FALSE;
@@ -1336,25 +1863,6 @@ bool MDL_context::can_wait_lead_to_deadlock_impl() const
/**
- Implement a simple deadlock detection heuristic: check if there
- are any pending exclusive locks which conflict with shared locks
- held by this thread. In that case waiting can be circular,
- i.e. lead to a deadlock.
-
- @return TRUE if there are any conflicting locks, FALSE otherwise.
-*/
-
-bool MDL_context::can_wait_lead_to_deadlock() const
-{
- bool result;
- pthread_mutex_lock(&LOCK_mdl);
- result= can_wait_lead_to_deadlock_impl();
- pthread_mutex_unlock(&LOCK_mdl);
- return result;
-}
-
-
-/**
Wait until there will be no locks that conflict with lock requests
in the given list.
@@ -1391,8 +1899,6 @@ MDL_context::wait_for_locks(MDL_request_list *mdl_requests)
COND_mdl because of above scenario.
*/
mysql_ha_flush(m_thd);
- pthread_mutex_lock(&LOCK_mdl);
- old_msg= MDL_ENTER_COND(m_thd, mysys_var);
/*
In cases when we wait while still holding some metadata
@@ -1406,9 +1912,8 @@ MDL_context::wait_for_locks(MDL_request_list *mdl_requests)
negatives) in situations when conflicts are rare (in our
case this is true since DDL statements should be rare).
*/
- if (can_wait_lead_to_deadlock_impl())
+ if (can_wait_lead_to_deadlock())
{
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
my_error(ER_LOCK_DEADLOCK, MYF(0));
return TRUE;
}
@@ -1418,83 +1923,104 @@ MDL_context::wait_for_locks(MDL_request_list *mdl_requests)
{
MDL_key *key= &mdl_request->key;
DBUG_ASSERT(mdl_request->ticket == NULL);
- if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
- break;
+
/*
To avoid starvation we don't wait if we have a conflict against
request for MDL_EXCLUSIVE lock.
*/
- if (mdl_request->is_shared() &&
- (lock= (MDL_lock*) my_hash_search(&mdl_locks, key->ptr(),
- key->length())) &&
- !lock->can_grant_lock(this, mdl_request->type, FALSE))
+ if (mdl_request->is_shared() ||
+ mdl_request->type == MDL_INTENTION_EXCLUSIVE)
+ {
+ /* The below call also implicitly locks MDL_lock::m_mutex. */
+ if (! (lock= mdl_locks.find(key)))
+ continue;
+
+ if (lock->can_grant_lock(this, mdl_request->type, FALSE))
+ {
+ pthread_mutex_unlock(&lock->m_mutex);
+ continue;
+ }
+
+ MDL_ticket *pending_ticket;
+ if (! (pending_ticket= MDL_ticket::create(this, mdl_request->type)))
+ {
+ pthread_mutex_unlock(&lock->m_mutex);
+ return TRUE;
+ }
+ if (mdl_request->is_shared())
+ lock->waiting_shared.push_front(pending_ticket);
+ else
+ lock->waiting_exclusive.push_front(pending_ticket);
+
+ old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_ctx_wakeup_cond,
+ &lock->m_mutex);
+
+ pthread_cond_wait(&m_ctx_wakeup_cond, &lock->m_mutex);
+
+ /*
+ We have to do MDL_EXIT_COND here and then re-acquire the lock
+ as there is a chance that we will destroy MDL_lock object and
+ won't be able to call MDL_EXIT_COND after it.
+ */
+ MDL_EXIT_COND(m_thd, mysys_var, &lock->m_mutex, old_msg);
+
+ pthread_mutex_lock(&lock->m_mutex);
+ if (mdl_request->is_shared())
+ lock->waiting_shared.remove(pending_ticket);
+ else
+ lock->waiting_exclusive.remove(pending_ticket);
+ if (lock->is_empty())
+ mdl_locks.remove(lock);
+ else
+ pthread_mutex_unlock(&lock->m_mutex);
+ MDL_ticket::destroy(pending_ticket);
break;
+ }
}
if (!mdl_request)
{
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+ /* There are no conflicts for any locks! */
break;
}
- m_is_waiting_in_mdl= TRUE;
- pthread_cond_wait(&COND_mdl, &LOCK_mdl);
- m_is_waiting_in_mdl= FALSE;
- /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
- MDL_EXIT_COND(m_thd, mysys_var, old_msg);
}
return mysys_var->abort;
}
/**
- Auxiliary function which allows to release particular lock
- ownership of which is represented by a lock ticket object.
+ Release lock.
+
+ @param ticket Ticket for lock to be released.
*/
-void MDL_context::release_ticket(MDL_ticket *ticket)
+void MDL_context::release_lock(MDL_ticket *ticket)
{
MDL_lock *lock= ticket->m_lock;
- DBUG_ENTER("release_ticket");
+ DBUG_ENTER("MDL_context::release_lock");
DBUG_PRINT("enter", ("db=%s name=%s", lock->key.db_name(),
lock->key.name()));
- safe_mutex_assert_owner(&LOCK_mdl);
+ DBUG_ASSERT(this == ticket->m_ctx);
+ safe_mutex_assert_not_owner(&LOCK_open);
if (ticket == m_lt_or_ha_sentinel)
m_lt_or_ha_sentinel= ++Ticket_list::Iterator(m_tickets, ticket);
- m_tickets.remove(ticket);
+ pthread_mutex_lock(&lock->m_mutex);
- switch (ticket->m_type)
- {
- case MDL_SHARED_UPGRADABLE:
- global_lock.active_intention_exclusive--;
- /* Fallthrough. */
- case MDL_SHARED:
- case MDL_SHARED_HIGH_PRIO:
- lock->granted.remove(ticket);
- break;
- case MDL_EXCLUSIVE:
- lock->type= MDL_lock::MDL_LOCK_SHARED;
- lock->granted.remove(ticket);
- global_lock.active_intention_exclusive--;
- break;
- default:
- DBUG_ASSERT(0);
- }
-
- MDL_ticket::destroy(ticket);
+ lock->granted.remove(ticket);
if (lock->is_empty())
+ mdl_locks.remove(lock);
+ else
{
- my_hash_delete(&mdl_locks, (uchar *)lock);
- DBUG_PRINT("info", ("releasing cached_object cached_object=%p",
- lock->cached_object));
- if (lock->cached_object)
- (*lock->cached_object_release_hook)(lock->cached_object);
- MDL_lock::destroy(lock);
+ lock->wake_up_waiters();
+ pthread_mutex_unlock(&lock->m_mutex);
}
+ m_tickets.remove(ticket);
+ MDL_ticket::destroy(ticket);
+
DBUG_VOID_RETURN;
}
@@ -1522,44 +2048,26 @@ void MDL_context::release_locks_stored_before(MDL_ticket *sentinel)
Ticket_iterator it(m_tickets);
DBUG_ENTER("MDL_context::release_locks_stored_before");
- safe_mutex_assert_not_owner(&LOCK_open);
-
if (m_tickets.is_empty())
DBUG_VOID_RETURN;
- pthread_mutex_lock(&LOCK_mdl);
while ((ticket= it++) && ticket != sentinel)
{
DBUG_PRINT("info", ("found lock to release ticket=%p", ticket));
- release_ticket(ticket);
+ release_lock(ticket);
}
- /* Inefficient but will do for a while */
- pthread_cond_broadcast(&COND_mdl);
- pthread_mutex_unlock(&LOCK_mdl);
+ /*
+ If all locks were released, then the sentinel was not present
+ in the list. It must never happen because the sentinel was
+ bogus, i.e. pointed to a ticket that no longer exists.
+ */
+ DBUG_ASSERT(! m_tickets.is_empty() || sentinel == NULL);
DBUG_VOID_RETURN;
}
/**
- Release a lock.
-
- @param ticket Lock to be released
-*/
-
-void MDL_context::release_lock(MDL_ticket *ticket)
-{
- DBUG_ASSERT(this == ticket->m_ctx);
- safe_mutex_assert_not_owner(&LOCK_open);
-
- pthread_mutex_lock(&LOCK_mdl);
- release_ticket(ticket);
- pthread_cond_broadcast(&COND_mdl);
- pthread_mutex_unlock(&LOCK_mdl);
-}
-
-
-/**
Release all locks in the context which correspond to the same name/
object as this lock request.
@@ -1569,7 +2077,7 @@ void MDL_context::release_lock(MDL_ticket *ticket)
void MDL_context::release_all_locks_for_name(MDL_ticket *name)
{
- /* Use MDL_ticket::lock to identify other locks for the same object. */
+ /* Use MDL_ticket::m_lock to identify other locks for the same object. */
MDL_lock *lock= name->m_lock;
/* Remove matching lock tickets from the context. */
@@ -1600,11 +2108,18 @@ void MDL_ticket::downgrade_exclusive_lock()
if (is_shared())
return;
- pthread_mutex_lock(&LOCK_mdl);
- m_lock->type= MDL_lock::MDL_LOCK_SHARED;
+ pthread_mutex_lock(&m_lock->m_mutex);
m_type= MDL_SHARED_UPGRADABLE;
- pthread_cond_broadcast(&COND_mdl);
- pthread_mutex_unlock(&LOCK_mdl);
+
+ if (! m_lock->waiting_shared.is_empty())
+ {
+ MDL_lock::Ticket_iterator it(m_lock->waiting_shared);
+ MDL_ticket *ticket;
+ while ((ticket= it++))
+ ticket->get_ctx()->awake();
+ }
+
+ pthread_mutex_unlock(&m_lock->m_mutex);
}
@@ -1614,14 +2129,22 @@ void MDL_ticket::downgrade_exclusive_lock()
void MDL_context::release_global_shared_lock()
{
+ MDL_request mdl_request;
+ MDL_ticket *ticket;
+ bool not_used;
+
+ mdl_request.init(MDL_key::GLOBAL, "", "", MDL_SHARED);
+
safe_mutex_assert_not_owner(&LOCK_open);
- DBUG_ASSERT(m_has_global_shared_lock);
- pthread_mutex_lock(&LOCK_mdl);
- global_lock.active_shared--;
- m_has_global_shared_lock= FALSE;
- pthread_cond_broadcast(&COND_mdl);
- pthread_mutex_unlock(&LOCK_mdl);
+ /*
+ TODO/QQ/FIXME: In theory we always should be able to find
+ ticket here. But in practice this is not
+ always TRUE.
+ */
+
+ if ((ticket= find_ticket(&mdl_request, &not_used)))
+ release_lock(ticket);
}
@@ -1687,40 +2210,16 @@ MDL_context::is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
existing shared lock.
@pre The ticket must match an acquired lock.
- @pre The caller already has acquired LOCK_mdl.
@return TRUE if there is a conflicting lock request, FALSE otherwise.
*/
-bool MDL_ticket::has_pending_conflicting_lock_impl() const
-{
- DBUG_ASSERT(is_shared());
- safe_mutex_assert_owner(&LOCK_mdl);
-
- return !m_lock->waiting.is_empty();
-}
-
-
-/**
- Check if we have any pending exclusive locks which conflict with
- existing shared lock.
-
- @pre The ticket must match an acquired lock.
-
- @return TRUE if there is a pending conflicting lock request,
- FALSE otherwise.
-*/
-
bool MDL_ticket::has_pending_conflicting_lock() const
{
- bool result;
-
safe_mutex_assert_not_owner(&LOCK_open);
+ DBUG_ASSERT(is_shared());
- pthread_mutex_lock(&LOCK_mdl);
- result= has_pending_conflicting_lock_impl();
- pthread_mutex_unlock(&LOCK_mdl);
- return result;
+ return m_lock->has_pending_exclusive_lock();
}