diff options
author | Konstantin Osipov <kostja@sun.com> | 2010-08-09 22:33:47 +0400 |
---|---|---|
committer | Konstantin Osipov <kostja@sun.com> | 2010-08-09 22:33:47 +0400 |
commit | 523066987d6150347b3a56d403187312816cab8d (patch) | |
tree | 220bf71a69ee638da7cfd2c542d73122422a8c2f /sql/ha_ndbcluster.cc | |
parent | b1207bf1b83270e7440755d75fa549b480b56f82 (diff) | |
download | mariadb-git-523066987d6150347b3a56d403187312816cab8d.tar.gz |
A fix for Bug#41158 "DROP TABLE holds LOCK_open during unlink()".
Remove acquisition of LOCK_open around file system operations,
since such operations are now protected by metadata locks.
Rework table discovery algorithm to not require LOCK_open.
No new tests added since all MDL locking operations are covered
in lock.test and mdl_sync.test, and as long as these tests
pass despite the increased concurrency, consistency must be
unaffected.
mysql-test/t/disabled.def:
Disable NDB tests due to Bug#55799.
sql/datadict.cc:
No longer necessary to protect ha_create_table() with
LOCK_open. Serial execution is now ensured by metadata
locks.
sql/ha_ndbcluster.cc:
Do not manipulate with LOCK_open in cluster code.
sql/ha_ndbcluster_binlog.cc:
Do not manipulate with LOCK_open in cluster code.
sql/ha_ndbcluster_binlog.h:
Update function signature.
sql/handler.cc:
Implement ha_check_if_table_exists().
@todo: some engines provide ha_table_exists_in_engine()
handlerton call, for those we perhaps shouldn't
call ha_discover(), to be more efficient.
Since currently it's only NDB, postpone till
integration with NDB.
sql/handler.h:
Declare ha_check_if_table_exists() function.
sql/mdl.cc:
Remove an obsolete comment.
sql/sql_base.cc:
Update to a new signature of close_cached_tables():
from now on we always call it without LOCK_open.
Update comments.
Remove get_table_share_with_create(), we should
not attempt to create a table under LOCK_open.
Introduce get_table_share_with_discover() instead,
which would request a back off action if the table
exists in engine.
Remove acquisition of LOCK_open for
data dictionary operations, such as check_if_table_exists().
Do not use get_table_share_with_create/discover for views,
where it's not needed.
Make tdc_remove_table() optionally acquire LOCK_open
to simplify usage of this function.
Use the right mutex in the partitioning code when
manipulating with thd->open_tables.
sql/sql_base.h:
Update signatures of changes functions.
sql/sql_insert.cc:
Do not wrap quick_rm_table() with LOCK_open acquisition,
this is unnecessary.
sql/sql_parse.cc:
Update to the new calling convention of tdc_remove_table().
Update to the new signature of close_cached_tables().
Update comments.
sql/sql_rename.cc:
Update to the new calling convention of tdc_remove_table().
Remove acquisition of LOCK_open around filesystem
operations.
sql/sql_show.cc:
Remove get_trigger_table_impl().
Do not acquire LOCK_open for a dirty read of the trigger
file.
sql/sql_table.cc:
Do not acquire LOCK_open for filesystem operations.
sql/sql_trigger.cc:
Do not require LOCK_open for trigger file I/O.
sql/sql_truncate.cc:
Update to the new signature of tdc_remove_table().
sql/sql_view.cc:
Do not require LOCK_open for view I/O.
Use tdc_remove_table() to expel view share.
Update comments.
sql/sys_vars.cc:
Update to the new signature of close_cached_tables().
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r-- | sql/ha_ndbcluster.cc | 29 |
1 files changed, 8 insertions, 21 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d4a98265c49..0ec2e21056e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -680,7 +680,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) bzero((char*) &table_list,sizeof(table_list)); table_list.db= m_dbname; table_list.alias= table_list.table_name= m_tabname; - close_cached_tables(thd, &table_list, FALSE, FALSE); + close_cached_tables(thd, &table_list, FALSE); break; } default: @@ -5702,7 +5702,7 @@ int ha_ndbcluster::create(const char *name, m_table->getObjectVersion(), (is_truncate) ? SOT_TRUNCATE_TABLE : SOT_CREATE_TABLE, - 0, 0, 1); + 0, 0); break; } } @@ -6143,7 +6143,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) old_dbname, m_tabname, ndb_table_id, ndb_table_version, SOT_RENAME_TABLE, - m_dbname, new_tabname, 1); + m_dbname, new_tabname); } // If we are moving tables between databases, we need to recreate @@ -6337,7 +6337,7 @@ retry_temporary_error1: thd->query(), thd->query_length(), share->db, share->table_name, ndb_table_id, ndb_table_version, - SOT_DROP_TABLE, 0, 0, 1); + SOT_DROP_TABLE, 0, 0); } else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op will do a force GCP */ @@ -7019,7 +7019,6 @@ int ndbcluster_drop_database_impl(const char *path) while ((tabname=it++)) { tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1); - mysql_mutex_lock(&LOCK_open); if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname)) { const NdbError err= dict->getNdbError(); @@ -7029,7 +7028,6 @@ int ndbcluster_drop_database_impl(const char *path) ret= ndb_to_mysql_error(&err); } } - mysql_mutex_unlock(&LOCK_open); } DBUG_RETURN(ret); } @@ -7056,7 +7054,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path) ha_ndbcluster::set_dbname(path, db); ndbcluster_log_schema_op(thd, 0, thd->query(), thd->query_length(), - db, "", 0, 0, SOT_DROP_DB, 0, 0, 0); + db, "", 0, 0, SOT_DROP_DB, 0, 0); #endif DBUG_VOID_RETURN; } @@ -7181,7 +7179,6 @@ int ndbcluster_find_all_files(THD *thd) my_free(data); my_free(pack_data); - mysql_mutex_lock(&LOCK_open); if (discover) { /* ToDo 4.1 database needs to be created if missing */ @@ -7199,7 +7196,6 @@ int ndbcluster_find_all_files(THD *thd) TRUE); } #endif - mysql_mutex_unlock(&LOCK_open); } } while (unhandled && retries); @@ -7292,19 +7288,16 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, file_name->str, reg_ext, 0); if (my_access(name, F_OK)) { - mysql_mutex_lock(&LOCK_open); DBUG_PRINT("info", ("Table %s listed and need discovery", file_name->str)); if (ndb_create_table_from_engine(thd, db, file_name->str)) { - mysql_mutex_unlock(&LOCK_open); push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TABLE_EXISTS_ERROR, "Discover of table %s.%s failed", db, file_name->str); continue; } - mysql_mutex_unlock(&LOCK_open); } DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name->str)); file_on_disk= TRUE; @@ -7361,10 +7354,8 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, file_name_str= (char*)my_hash_element(&ok_tables, i); end= end1 + tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name)); - mysql_mutex_lock(&LOCK_open); ndbcluster_create_binlog_setup(ndb, name, end-name, db, file_name_str, TRUE); - mysql_mutex_unlock(&LOCK_open); } } #endif @@ -7426,7 +7417,6 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, } /* Lock mutex before creating .FRM files. */ - mysql_mutex_lock(&LOCK_open); /* Create new files. */ List_iterator_fast<char> it2(create_list); while ((file_name_str=it2++)) @@ -7441,8 +7431,6 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, } } - mysql_mutex_unlock(&LOCK_open); - my_hash_free(&ok_tables); my_hash_free(&ndb_tables); @@ -8452,8 +8440,7 @@ int handle_trailing_share(NDB_SHARE *share) bzero((char*) &table_list,sizeof(table_list)); table_list.db= share->db; table_list.alias= table_list.table_name= share->table_name; - mysql_mutex_assert_owner(&LOCK_open); - close_cached_tables(thd, &table_list, TRUE, FALSE); + close_cached_tables(thd, &table_list, FALSE); mysql_mutex_lock(&ndbcluster_mutex); /* ndb_share reference temporary free */ @@ -10612,13 +10599,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, thd->query(), thd->query_length(), "", alter_info->tablespace_name, 0, 0, - SOT_TABLESPACE, 0, 0, 0); + SOT_TABLESPACE, 0, 0); else ndbcluster_log_schema_op(thd, 0, thd->query(), thd->query_length(), "", alter_info->logfile_group_name, 0, 0, - SOT_LOGFILE_GROUP, 0, 0, 0); + SOT_LOGFILE_GROUP, 0, 0); #endif DBUG_RETURN(FALSE); |