diff options
author | unknown <tomas@poseidon.(none)> | 2004-09-14 12:47:34 +0000 |
---|---|---|
committer | unknown <tomas@poseidon.(none)> | 2004-09-14 12:47:34 +0000 |
commit | 2ed29f93716a8fa7d20c8a524aec62ea419528a3 (patch) | |
tree | 537321da689d9c852c4bf488041139efea7ed757 | |
parent | 714667b448f5754628c59c8dc405e8f12f7232f2 (diff) | |
download | mariadb-git-2ed29f93716a8fa7d20c8a524aec62ea419528a3.tar.gz |
new method to set size of local table data
clearer configure description texts
changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor
move definition if Thd_ndb to .h file and changes seize/release to operate on Thd_ndb instead of Ndb objects
moved allocation/deletion of Ndb objects to Thd_ndb
ndb/include/ndbapi/NdbDictionary.hpp:
new method to set size of local table data
ndb/src/mgmsrv/ConfigInfo.cpp:
clearer configure description texts
ndb/src/ndbapi/DictCache.cpp:
changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor
ndb/src/ndbapi/DictCache.hpp:
changed Ndb_local_table_info to use create, destroy metods and hidden constructor/destructor
ndb/src/ndbapi/NdbDictionary.cpp:
new method to set size of local table data
ndb/src/ndbapi/NdbDictionaryImpl.cpp:
new method to set size of local table data
ndb/src/ndbapi/NdbDictionaryImpl.hpp:
new method to set size of local table data
sql/ha_ndbcluster.cc:
new method to set size of local table data
moved allocation/deletion of Ndb objects to Thd_ndb
sql/ha_ndbcluster.h:
move definition if Thd_ndb to .h file and changes seize/release to operate on Thd_ndb instead of Ndb objects
-rw-r--r-- | ndb/include/ndbapi/NdbDictionary.hpp | 1 | ||||
-rw-r--r-- | ndb/src/mgmsrv/ConfigInfo.cpp | 66 | ||||
-rw-r--r-- | ndb/src/ndbapi/DictCache.cpp | 26 | ||||
-rw-r--r-- | ndb/src/ndbapi/DictCache.hpp | 11 | ||||
-rw-r--r-- | ndb/src/ndbapi/NdbDictionary.cpp | 5 | ||||
-rw-r--r-- | ndb/src/ndbapi/NdbDictionaryImpl.cpp | 7 | ||||
-rw-r--r-- | ndb/src/ndbapi/NdbDictionaryImpl.hpp | 1 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 72 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 17 |
9 files changed, 116 insertions, 90 deletions
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index bb7e96bde1b..5c470c1d25f 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -1068,6 +1068,7 @@ public: const char * tableName); public: const Table * getTable(const char * name, void **data); + void set_local_table_data_size(unsigned sz); }; }; diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 377bc7c435f..ea19bc76d0e 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -33,6 +33,10 @@ * Section names ****************************************************************************/ +#define DB_TOKEN_PRINT "ndbd(DB)" +#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)" +#define API_TOKEN_PRINT "mysqld(API)" + #define DB_TOKEN "DB" #define MGM_TOKEN "MGM" #define API_TOKEN "API" @@ -327,7 +331,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_SYS_PRIMARY_MGM_NODE, "PrimaryMGMNode", "SYSTEM", - "Node id of Primary "MGM_TOKEN" node", + "Node id of Primary "MGM_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -388,7 +392,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", DB_TOKEN, - "Number identifying the database node ("DB_TOKEN")", + "Number identifying the database node ("DB_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -484,7 +488,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_INDEX_OPS, "MaxNoOfConcurrentIndexOperations", DB_TOKEN, - "Total number of index operations that can execute simultaneously on one "DB_TOKEN" node", + "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -509,7 +513,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_TRIGGER_OPS, "MaxNoOfFiredTriggers", DB_TOKEN, - "Total number of triggers that can fire simultaneously in one "DB_TOKEN" node", + "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -568,7 +572,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_STOP_ON_ERROR, "StopOnError", DB_TOKEN, - "If set to N, "DB_TOKEN" automatically restarts/recovers in case of node failure", + "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure", ConfigInfo::USED, true, ConfigInfo::BOOL, @@ -640,7 +644,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_TRANSACTIONS, "MaxNoOfConcurrentTransactions", DB_TOKEN, - "Max number of transaction executing concurrently on the "DB_TOKEN" node", + "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -652,7 +656,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_SCANS, "MaxNoOfConcurrentScans", DB_TOKEN, - "Max number of scans executing concurrently on the "DB_TOKEN" node", + "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -664,7 +668,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_TRANS_BUFFER_MEM, "TransactionBufferMemory", DB_TOKEN, - "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN" node", + "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -676,7 +680,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_INDEX_MEM, "IndexMemory", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for storing indexes", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes", ConfigInfo::USED, false, ConfigInfo::INT64, @@ -688,7 +692,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_DATA_MEM, "DataMemory", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for storing data", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data", ConfigInfo::USED, false, ConfigInfo::INT64, @@ -700,7 +704,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_UNDO_INDEX_BUFFER, "UndoIndexBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for index part", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part", ConfigInfo::USED, false, ConfigInfo::INT, @@ -712,7 +716,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_UNDO_DATA_BUFFER, "UndoDataBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for data part", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part", ConfigInfo::USED, false, ConfigInfo::INT, @@ -724,7 +728,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_REDO_BUFFER, "RedoBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for writing REDO logs", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs", ConfigInfo::USED, false, ConfigInfo::INT, @@ -736,7 +740,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_LONG_SIGNAL_BUFFER, "LongMessageBuffer", DB_TOKEN, - "Number bytes on each "DB_TOKEN" node allocated for internal long messages", + "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages", ConfigInfo::USED, false, ConfigInfo::INT, @@ -784,7 +788,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_HEARTBEAT_INTERVAL, "HeartbeatIntervalDbDb", DB_TOKEN, - "Time between "DB_TOKEN"-"DB_TOKEN" heartbeats. "DB_TOKEN" considered dead after 3 missed HBs", + "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs", ConfigInfo::USED, true, ConfigInfo::INT, @@ -796,7 +800,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_API_HEARTBEAT_INTERVAL, "HeartbeatIntervalDbApi", DB_TOKEN, - "Time between "API_TOKEN"-"DB_TOKEN" heartbeats. "API_TOKEN" connection closed after 3 missed HBs", + "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs", ConfigInfo::USED, true, ConfigInfo::INT, @@ -832,7 +836,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_NO_REDOLOG_FILES, "NoOfFragmentLogFiles", DB_TOKEN, - "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN" node", + "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node", ConfigInfo::USED, false, ConfigInfo::INT, @@ -844,7 +848,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { KEY_INTERNAL, "MaxNoOfOpenFiles", DB_TOKEN, - "Max number of files open per "DB_TOKEN" node.(One thread is created per file)", + "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)", ConfigInfo::USED, false, ConfigInfo::INT, @@ -998,7 +1002,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_DB_FILESYSTEM_PATH, "FileSystemPath", DB_TOKEN, - "Path to directory where the "DB_TOKEN" node stores its data (directory must exist)", + "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1288,7 +1292,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", API_TOKEN, - "Number identifying application node ("API_TOKEN")", + "Number identifying application node ("API_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1311,7 +1315,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ARBIT_RANK, "ArbitrationRank", API_TOKEN, - "If 0, then "API_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", + "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1419,7 +1423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ID, "Id", MGM_TOKEN, - "Number identifying the management server node ("MGM_TOKEN")", + "Number identifying the management server node ("MGM_TOKEN_PRINT")", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1489,7 +1493,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_NODE_ARBIT_RANK, "ArbitrationRank", MGM_TOKEN, - "If 0, then "MGM_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", + "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1550,7 +1554,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "TCP", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1561,7 +1565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "TCP", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1681,7 +1685,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "SHM", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1704,7 +1708,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "SHM", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::STRING, @@ -1801,7 +1805,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "SCI", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1813,7 +1817,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "SCI", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1956,7 +1960,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_1, "NodeId1", "OSE", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, @@ -1968,7 +1972,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { CFG_CONNECTION_NODE_2, "NodeId2", "OSE", - "Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", + "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", ConfigInfo::USED, false, ConfigInfo::INT, diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 0c778d7222e..12300ce216f 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -21,19 +21,29 @@ #include <NdbCondition.h> #include <NdbSleep.h> -Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz) +Ndb_local_table_info * +Ndb_local_table_info::create(NdbTableImpl *table_impl, Uint32 sz) +{ + void *data= malloc(sizeof(NdbTableImpl)+sz-1); + if (data == 0) + return 0; + memset(data,0,sizeof(NdbTableImpl)+sz-1); + new (data) Ndb_local_table_info(table_impl); + return (Ndb_local_table_info *) data; +} + +void Ndb_local_table_info::destroy(Ndb_local_table_info *info) +{ + free((void *)info); +} + +Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl) { m_table_impl= table_impl; - if (sz) - m_local_data= malloc(sz); - else - m_local_data= 0; } Ndb_local_table_info::~Ndb_local_table_info() { - if (m_local_data) - free(m_local_data); } LocalDictCache::LocalDictCache(){ @@ -61,7 +71,7 @@ void LocalDictCache::drop(const char * name){ Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name)); DBUG_ASSERT(info != 0); - delete info; + Ndb_local_table_info::destroy(info); } /***************************************************************** diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp index f94ad7a6fa9..0dc853306fa 100644 --- a/ndb/src/ndbapi/DictCache.hpp +++ b/ndb/src/ndbapi/DictCache.hpp @@ -29,12 +29,13 @@ class Ndb_local_table_info { public: - Ndb_local_table_info(NdbTableImpl *table_impl, Uint32 sz=0); - ~Ndb_local_table_info(); + static Ndb_local_table_info *create(NdbTableImpl *table_impl, Uint32 sz=0); + static void destroy(Ndb_local_table_info *); NdbTableImpl *m_table_impl; - Uint64 m_first_tuple_id; - Uint64 m_last_tuple_id; - void *m_local_data; + char m_local_data[1]; +private: + Ndb_local_table_info(NdbTableImpl *table_impl); + ~Ndb_local_table_info(); }; /** diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a92126abae7..8000b53d3be 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -688,6 +688,11 @@ NdbDictionary::Dictionary::getTable(const char * name, void **data){ return 0; } +void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz) +{ + m_impl.m_local_table_data_size= sz; +} + const NdbDictionary::Table * NdbDictionary::Dictionary::getTable(const char * name){ return getTable(name, 0); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 6b36b776f14..7be43c46a9b 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -589,6 +589,7 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb, m_ndb(ndb) { m_globalHash = 0; + m_local_table_data_size= 0; } static int f_dictionary_count = 0; @@ -600,7 +601,7 @@ NdbDictionaryImpl::~NdbDictionaryImpl() while(curr != 0){ m_globalHash->lock(); m_globalHash->release(curr->theData->m_table_impl); - delete curr->theData; + Ndb_local_table_info::destroy(curr->theData); m_globalHash->unlock(); curr = m_localHash.m_tableHash.getNext(curr); @@ -641,9 +642,7 @@ NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName) } } - Ndb_local_table_info *info= new Ndb_local_table_info(impl, 32); - info->m_first_tuple_id= ~0; - info->m_last_tuple_id= ~0; + Ndb_local_table_info *info= Ndb_local_table_info::create(impl, m_local_table_data_size); m_localHash.put(internalTableName, info); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index cd0463f7126..da5e7e45c36 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -400,6 +400,7 @@ public: const NdbError & getNdbError() const; NdbError m_error; + Uint32 m_local_table_data_size; LocalDictCache m_localHash; GlobalDictCache * m_globalHash; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 48543847649..8b6f2d5cfef 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -142,24 +142,17 @@ static int ndb_to_mysql_error(const NdbError *err) Place holder for ha_ndbcluster thread specific data */ -class Thd_ndb { -public: - Thd_ndb(); - ~Thd_ndb(); - Ndb *ndb; - ulong count; - uint lock_count; -}; - Thd_ndb::Thd_ndb() { - ndb= 0; + ndb= new Ndb(g_ndb_cluster_connection, ""); lock_count= 0; count= 0; } Thd_ndb::~Thd_ndb() { + if (ndb) + delete ndb; } /* @@ -168,7 +161,7 @@ Thd_ndb::~Thd_ndb() struct Ndb_table_local_info { int no_uncommitted_rows_count; - ulong transaction_count; + ulong last_count; ha_rows records; }; @@ -195,9 +188,9 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info; Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; - if (info->transaction_count != thd_ndb->count) + if (info->last_count != thd_ndb->count) { - info->transaction_count = thd_ndb->count; + info->last_count = thd_ndb->count; info->no_uncommitted_rows_count= 0; info->records= ~(ha_rows)0; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", @@ -3346,10 +3339,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_tabname[0]= '\0'; m_dbname[0]= '\0'; - // TODO Adjust number of records and other parameters for proper - // selection of scan/pk access - // records= 100; - records= 0; + records= ~(ha_rows)0; // uninitialized block_size= 1024; for (i= 0; i < MAX_KEY; i++) @@ -3444,41 +3434,44 @@ int ha_ndbcluster::close(void) } -Ndb* ha_ndbcluster::seize_ndb() +Thd_ndb* ha_ndbcluster::seize_thd_ndb() { - Ndb* ndb; - DBUG_ENTER("seize_ndb"); + Thd_ndb *thd_ndb; + DBUG_ENTER("seize_thd_ndb"); #ifdef USE_NDB_POOL // Seize from pool ndb= Ndb::seize(); + xxxxxxxxxxxxxx error #else - ndb= new Ndb(g_ndb_cluster_connection, ""); + thd_ndb= new Thd_ndb(); #endif - if (ndb->init(max_transactions) != 0) + thd_ndb->ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); + if (thd_ndb->ndb->init(max_transactions) != 0) { - ERR_PRINT(ndb->getNdbError()); + ERR_PRINT(thd_ndb->ndb->getNdbError()); /* TODO Alt.1 If init fails because to many allocated Ndb wait on condition for a Ndb object to be released. Alt.2 Seize/release from pool, wait until next release */ - delete ndb; - ndb= NULL; + delete thd_ndb; + thd_ndb= NULL; } - DBUG_RETURN(ndb); + DBUG_RETURN(thd_ndb); } -void ha_ndbcluster::release_ndb(Ndb* ndb) +void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) { - DBUG_ENTER("release_ndb"); + DBUG_ENTER("release_thd_ndb"); #ifdef USE_NDB_POOL // Release to pool Ndb::release(ndb); + xxxxxxxxxxxx error #else - delete ndb; + delete thd_ndb; #endif DBUG_VOID_RETURN; } @@ -3497,19 +3490,18 @@ void ha_ndbcluster::release_ndb(Ndb* ndb) int ha_ndbcluster::check_ndb_connection() { - THD* thd= current_thd; - Ndb* ndb; + THD *thd= current_thd; + Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; DBUG_ENTER("check_ndb_connection"); - if (!thd->transaction.thd_ndb) + if (!thd_ndb) { - ndb= seize_ndb(); - if (!ndb) + thd_ndb= seize_thd_ndb(); + if (!thd_ndb) DBUG_RETURN(2); - thd->transaction.thd_ndb= new Thd_ndb(); - ((Thd_ndb *)thd->transaction.thd_ndb)->ndb= ndb; + thd->transaction.thd_ndb= thd_ndb; } - m_ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + m_ndb= thd_ndb->ndb; m_ndb->setDatabaseName(m_dbname); DBUG_RETURN(0); } @@ -3517,12 +3509,10 @@ int ha_ndbcluster::check_ndb_connection() void ndbcluster_close_connection(THD *thd) { Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; - Ndb* ndb; DBUG_ENTER("ndbcluster_close_connection"); if (thd_ndb) { - ha_ndbcluster::release_ndb(thd_ndb->ndb); - delete thd_ndb; + ha_ndbcluster::release_thd_ndb(thd_ndb); thd->transaction.thd_ndb= NULL; } DBUG_VOID_RETURN; @@ -3543,6 +3533,7 @@ int ndbcluster_discover(const char *dbname, const char *name, DBUG_PRINT("enter", ("db: %s, name: %s", dbname, name)); Ndb ndb(g_ndb_cluster_connection, dbname); + ndb.getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); if (ndb.init()) ERR_RETURN(ndb.getNdbError()); @@ -3633,6 +3624,7 @@ bool ndbcluster_init() // Create a Ndb object to open the connection to NDB g_ndb= new Ndb(g_ndb_cluster_connection, "sys"); + g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info)); if (g_ndb->init() != 0) { ERR_PRINT (g_ndb->getNdbError()); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 44a6873f4e5..f223ada55b1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -63,6 +63,19 @@ typedef struct st_ndbcluster_share { uint table_name_length,use_count; } NDB_SHARE; +/* + Place holder for ha_ndbcluster thread specific data +*/ + +class Thd_ndb { + public: + Thd_ndb(); + ~Thd_ndb(); + Ndb *ndb; + ulong count; + uint lock_count; +}; + class ha_ndbcluster: public handler { public: @@ -147,8 +160,8 @@ class ha_ndbcluster: public handler void start_bulk_insert(ha_rows rows); int end_bulk_insert(); - static Ndb* seize_ndb(); - static void release_ndb(Ndb* ndb); + static Thd_ndb* seize_thd_ndb(); + static void release_thd_ndb(Thd_ndb* thd_ndb); uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } private: |