summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <serg@serg.mylan>2005-03-23 22:40:12 +0100
committerunknown <serg@serg.mylan>2005-03-23 22:40:12 +0100
commita004c5101586d5c522a52c7cb84933bbd8037a4a (patch)
tree4ef36869a8d44ec01978ecfe5faaa3af0d315fe5 /sql
parentc5b5385c655932c30f9cb87a7b85d826e2dfac4e (diff)
parent7c5e5b2cc200a473a3e0cd0c5b6b9e42ab59f8c1 (diff)
downloadmariadb-git-a004c5101586d5c522a52c7cb84933bbd8037a4a.tar.gz
Merge
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_innodb.cc18
-rw-r--r--sql/ha_ndbcluster.cc59
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/sql_class.cc1
4 files changed, 57 insertions, 24 deletions
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 349a69e5a80..e29560f15a5 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -2453,7 +2453,14 @@ ha_innobase::store_key_val_for_row(
(byte*) (record
+ (ulint)get_field_offset(table, field)),
lenlen);
+
+ /* In a column prefix index, we may need to truncate
+ the stored value: */
+ if (len > key_part->length) {
+ len = key_part->length;
+ }
+
/* The length in a key value is always stored in 2
bytes */
@@ -2490,6 +2497,11 @@ ha_innobase::store_key_val_for_row(
ut_a(get_field_offset(table, field)
== key_part->offset);
+
+ /* All indexes on BLOB and TEXT are column prefix
+ indexes, and we may need to truncate the data to be
+ stored in the kay value: */
+
if (blob_len > key_part->length) {
blob_len = key_part->length;
}
@@ -2508,11 +2520,17 @@ ha_innobase::store_key_val_for_row(
buff += key_part->length;
} else {
+ /* Here we handle all other data types except the
+ true VARCHAR, BLOB and TEXT. Note that the column
+ value we store may be also in a column prefix
+ index. */
+
if (is_null) {
buff += key_part->length;
continue;
}
+
memcpy(buff, record + key_part->offset,
key_part->length);
buff += key_part->length;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 68523bfb41f..edeb702145d 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -308,7 +308,7 @@ Ndb *ha_ndbcluster::get_ndb()
* manage uncommitted insert/deletes during transactio to get records correct
*/
-struct Ndb_table_local_info {
+struct Ndb_local_table_statistics {
int no_uncommitted_rows_count;
ulong last_count;
ha_rows records;
@@ -329,7 +329,8 @@ void ha_ndbcluster::records_update()
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::records_update");
- struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
+ struct Ndb_local_table_statistics *info=
+ (struct Ndb_local_table_statistics *)m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
@@ -366,7 +367,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init");
- struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
+ struct Ndb_local_table_statistics *info=
+ (struct Ndb_local_table_statistics *)m_table_info;
Thd_ndb *thd_ndb= get_thd_ndb(thd);
if (info->last_count != thd_ndb->count)
{
@@ -385,8 +387,8 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c)
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
- struct Ndb_table_local_info *info=
- (struct Ndb_table_local_info *)m_table_info;
+ struct Ndb_local_table_statistics *info=
+ (struct Ndb_local_table_statistics *)m_table_info;
info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
@@ -838,10 +840,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
Get metadata for this table from NDB
IMPLEMENTATION
- - save the NdbDictionary::Table for easy access
- check that frm-file on disk is equal to frm-file
of table accessed in NDB
- - build a list of the indexes for the table
*/
int ha_ndbcluster::get_metadata(const char *path)
@@ -905,11 +905,12 @@ int ha_ndbcluster::get_metadata(const char *path)
if (error)
DBUG_RETURN(error);
-
- m_table= NULL;
- m_table_info= NULL;
- DBUG_RETURN(build_index_list(table, ILBP_OPEN));
+ m_tableVersion= tab->getObjectVersion();
+ m_table= (void *)tab;
+ m_table_info= NULL; // Set in external lock
+
+ DBUG_RETURN(build_index_list(ndb, table, ILBP_OPEN));
}
static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
@@ -937,7 +938,7 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
#endif
for (unsigned j= 0; j < sz; j++)
{
- const NdbDictionary::Column *c= index->getColumn(j);
+ const NDBCOL *c= index->getColumn(j);
if (strncmp(field_name, c->getName(), name_sz) == 0)
{
data.unique_index_attrid_map[i]= j;
@@ -949,7 +950,7 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
DBUG_RETURN(0);
}
-int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
+int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
{
uint i;
int error= 0;
@@ -958,8 +959,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
static const char* unique_suffix= "$unique";
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
- Ndb *ndb= get_ndb();
- NdbDictionary::Dictionary *dict= ndb->getDictionary();
+ NDBDICT *dict= ndb->getDictionary();
DBUG_ENTER("ha_ndbcluster::build_index_list");
// Save information about all known indexes
@@ -3107,6 +3107,10 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd,
When a table lock is held one transaction will be started which holds
the table lock and for each statement a hupp transaction will be started
+ If we are locking the table then:
+ - save the NdbDictionary::Table for easy access
+ - save reference to table statistics
+ - refresh list of the indexes for the table if needed (if altered)
*/
int ha_ndbcluster::external_lock(THD *thd, int lock_type)
@@ -3212,7 +3216,15 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if (!(tab= dict->getTable(m_tabname, &tab_info)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
- m_table= (void *)tab;
+ if (m_table != (void *)tab || m_tableVersion != tab->getObjectVersion())
+ {
+ /*
+ The table has been altered, refresh the index list
+ */
+ build_index_list(ndb, table, ILBP_OPEN);
+ m_table= (void *)tab;
+ m_tableVersion = tab->getObjectVersion();
+ }
m_table_info= tab_info;
}
no_uncommitted_rows_init(thd);
@@ -3873,7 +3885,7 @@ int ha_ndbcluster::create(const char *name,
m_dbname, m_tabname));
// Create secondary indexes
- my_errno= build_index_list(form, ILBP_CREATE);
+ my_errno= build_index_list(ndb, form, ILBP_CREATE);
if (!my_errno)
my_errno= write_ndb_file();
@@ -4092,6 +4104,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_active_trans(NULL),
m_active_cursor(NULL),
m_table(NULL),
+ m_tableVersion(-1),
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY |
@@ -4240,9 +4253,9 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
DBUG_ENTER("seize_thd_ndb");
thd_ndb= new Thd_ndb();
- thd_ndb->ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info));
-
-
+ thd_ndb->ndb->getDictionary()->set_local_table_data_size(
+ sizeof(Ndb_local_table_statistics)
+ );
if (thd_ndb->ndb->init(max_transactions) != 0)
{
ERR_PRINT(thd_ndb->ndb->getNdbError());
@@ -4332,7 +4345,7 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_table_local_info));
+ dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
dict->invalidateTable(name);
if (!(tab= dict->getTable(name)))
{
@@ -4378,7 +4391,7 @@ int ndbcluster_table_exists(THD* thd, const char *db, const char *name)
ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_table_local_info));
+ dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
dict->invalidateTable(name);
if (!(tab= dict->getTable(name)))
{
@@ -4597,7 +4610,7 @@ ndbcluster_init()
DBUG_PRINT("error", ("failed to create global ndb object"));
goto ndbcluster_init_error;
}
- g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info));
+ g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
if (g_ndb->init() != 0)
{
ERR_PRINT (g_ndb->getNdbError());
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 82925ad5d69..4574ddc3562 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -509,7 +509,7 @@ private:
int create_unique_index(const char *name, KEY *key_info);
int initialize_autoincrement(const void *table);
enum ILBP {ILBP_CREATE = 0, ILBP_OPEN = 1}; // Index List Build Phase
- int build_index_list(TABLE *tab, enum ILBP phase);
+ int build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase);
int get_metadata(const char* path);
void release_metadata();
NDB_INDEX_TYPE get_index_type(uint idx_no) const;
@@ -594,6 +594,7 @@ private:
NdbTransaction *m_active_trans;
NdbScanOperation *m_active_cursor;
void *m_table;
+ int m_tableVersion;
void *m_table_info;
char m_dbname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN];
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 5fb91d9c1a4..cf7240e4dba 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -193,6 +193,7 @@ THD::THD()
variables.pseudo_thread_id= 0;
one_shot_set= 0;
file_id = 0;
+ query_id= 0;
warn_id= 0;
db_charset= global_system_variables.collation_database;
bzero(ha_data, sizeof(ha_data));