summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
authorunknown <tomas@poseidon.ndb.mysql.com>2006-04-10 16:08:40 +0200
committerunknown <tomas@poseidon.ndb.mysql.com>2006-04-10 16:08:40 +0200
commit4618e6eccdea7d7008e70670e3326c4e3c5891b7 (patch)
tree79dfa9c29837453cf85cb459a33201d0e03808e7 /sql/ha_ndbcluster.cc
parentebf409a8be161ad947065983ec3406c6d6bce306 (diff)
downloadmariadb-git-4618e6eccdea7d7008e70670e3326c4e3c5891b7.tar.gz
Bug #18932: Cluster binlog mysqld accepts updating although binlog not setup
sql/ha_ndbcluster.cc: Bug #18932: Cluster binlog mysqld accepts updating although binlog not setup - put ndb tables in read only until binlog is setup properly sql/ha_ndbcluster_binlog.cc: Bug #18932: Cluster binlog mysqld accepts updating although binlog not setup - put ndb tables in read only until binlog is setup properly
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc93
1 files changed, 73 insertions, 20 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a4193e4eb33..570ac48238c 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -4480,6 +4480,21 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
}
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow table creation unless
+ schema distribution table is setup
+ ( unless it is a creation of the schema dist table itself )
+ */
+ if (!schema_share &&
+ !(strcmp(m_dbname, NDB_REP_DB) == 0 &&
+ strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif /* HAVE_NDB_BINLOG */
+
DBUG_PRINT("table", ("name: %s", m_tabname));
tab.setName(m_tabname);
tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE));
@@ -5004,7 +5019,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
is_old_table_tmpfile= 0;
String event_name(INJECTOR_EVENT_LEN);
ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
- ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share);
+ ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share,
+ "rename table");
}
if (!result && !IS_TMP_PREFIX(new_tabname))
@@ -5088,6 +5104,15 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
NDBDICT *dict= ndb->getDictionary();
#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
NDB_SHARE *share= get_share(path, 0, false);
#endif
@@ -5156,7 +5181,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0);
ndbcluster_handle_drop_table(ndb,
table_dropped ? event_name.c_ptr() : 0,
- share);
+ share, "delete table");
}
if (share)
@@ -5185,6 +5210,18 @@ int ha_ndbcluster::delete_table(const char *name)
set_dbname(name);
set_tabname(name);
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop table unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
+
if (check_ndb_connection())
DBUG_RETURN(HA_ERR_NO_CONNECTION);
@@ -5406,6 +5443,11 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
+#ifdef HAVE_NDB_BINLOG
+ if (!ndb_binlog_tables_inited && ndb_binlog_running)
+ table->db_stat|= HA_READ_ONLY;
+#endif
+
DBUG_RETURN(res);
}
@@ -5704,6 +5746,19 @@ int ndbcluster_drop_database_impl(const char *path)
static void ndbcluster_drop_database(char *path)
{
+ DBUG_ENTER("ndbcluster_drop_database");
+#ifdef HAVE_NDB_BINLOG
+ /*
+ Don't allow drop database unless
+ schema distribution table is setup
+ */
+ if (!schema_share)
+ {
+ DBUG_PRINT("info", ("Schema distribution table not setup"));
+ DBUG_VOID_RETURN;
+ //DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+#endif
ndbcluster_drop_database_impl(path);
#ifdef HAVE_NDB_BINLOG
char db[FN_REFLEN];
@@ -5712,6 +5767,7 @@ static void ndbcluster_drop_database(char *path)
current_thd->query, current_thd->query_length,
db, "", 0, 0, SOT_DROP_DB);
#endif
+ DBUG_VOID_RETURN;
}
/*
find all tables in ndb and discover those needed
@@ -5740,29 +5796,30 @@ int ndbcluster_find_all_files(THD *thd)
NDBDICT *dict= ndb->getDictionary();
- int unhandled, retries= 5;
+ int unhandled, retries= 5, skipped;
do
{
if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0)
ERR_RETURN(dict->getNdbError());
unhandled= 0;
+ skipped= 0;
+ retries--;
for (uint i= 0 ; i < list.count ; i++)
{
NDBDICT::List::Element& elmt= list.elements[i];
- int do_handle_table= 0;
if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name))
{
DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name));
continue;
}
DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name));
- if (elmt.state == NDBOBJ::StateOnline ||
- elmt.state == NDBOBJ::StateBackup)
- do_handle_table= 1;
- else if (!(elmt.state == NDBOBJ::StateBuilding))
+ if (elmt.state != NDBOBJ::StateOnline &&
+ elmt.state != NDBOBJ::StateBackup &&
+ elmt.state != NDBOBJ::StateBuilding)
{
sql_print_information("NDB: skipping setup table %s.%s, in state %d",
elmt.database, elmt.name, elmt.state);
+ skipped++;
continue;
}
@@ -5771,7 +5828,7 @@ int ndbcluster_find_all_files(THD *thd)
if (!(ndbtab= dict->getTable(elmt.name)))
{
- if (do_handle_table)
+ if (retries == 0)
sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
elmt.database, elmt.name,
dict->getNdbError().code,
@@ -5840,9 +5897,9 @@ int ndbcluster_find_all_files(THD *thd)
pthread_mutex_unlock(&LOCK_open);
}
}
- while (unhandled && retries--);
+ while (unhandled && retries);
- DBUG_RETURN(0);
+ DBUG_RETURN(-(skipped + unhandled));
}
int ndbcluster_find_files(THD *thd,const char *db,const char *path,
@@ -7706,6 +7763,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
pthread_cond_wait(&COND_server_started, &LOCK_server_started);
pthread_mutex_unlock(&LOCK_server_started);
+ ndbcluster_util_inited= 1;
+
/*
Wait for cluster to start
*/
@@ -7737,6 +7796,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
}
#ifdef HAVE_NDB_BINLOG
+ if (ndb_extra_logging && ndb_binlog_running)
+ sql_print_information("NDB Binlog: Ndb tables initially read only.");
/* create tables needed by the replication */
ndbcluster_setup_binlog_table_shares(thd);
#else
@@ -7746,17 +7807,9 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndbcluster_find_all_files(thd);
#endif
- ndbcluster_util_inited= 1;
-
-#ifdef HAVE_NDB_BINLOG
- /* Signal injector thread that all is setup */
- pthread_cond_signal(&injector_cond);
-#endif
-
set_timespec(abstime, 0);
for (;!abort_loop;)
{
-
pthread_mutex_lock(&LOCK_ndb_util_thread);
pthread_cond_timedwait(&COND_ndb_util_thread,
&LOCK_ndb_util_thread,
@@ -7774,7 +7827,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
Check that the apply_status_share and schema_share has been created.
If not try to create it
*/
- if (!apply_status_share || !schema_share)
+ if (!ndb_binlog_tables_inited)
ndbcluster_setup_binlog_table_shares(thd);
#endif