diff options
author | unknown <joreland@mysql.com> | 2004-11-08 11:06:36 +0100 |
---|---|---|
committer | unknown <joreland@mysql.com> | 2004-11-08 11:06:36 +0100 |
commit | 3b38a854e0fed2edaf6ab848959a2795445624f5 (patch) | |
tree | edc4cc564f38328f71740232e5dcb98e92c658f6 /sql/ha_ndbcluster.cc | |
parent | 0afed4c3eb914788528087405022878a2ef5bc95 (diff) | |
download | mariadb-git-3b38a854e0fed2edaf6ab848959a2795445624f5.tar.gz |
ndb: bug#6451
1) fix so that missing blob tables don't prevent table from being
dropped
2) decrease size of blob part if record length exceeds max length
3) add test case for table wo/ corresponding blob table
4) init scan counters when sending scan_tabreq
mysql-test/r/ndb_autodiscover.result:
testcase for table wo/ corresponding blob tables
mysql-test/r/ndb_autodiscover2.result:
testcase for table wo/ corresponding blob tables
mysql-test/t/ndb_autodiscover.test:
testcase for table wo/ corresponding blob tables
mysql-test/t/ndb_autodiscover2.test:
testcase for table wo/ corresponding blob tables
ndb/include/ndbapi/NdbDictionary.hpp:
Add non-const get column
ndb/src/ndbapi/NdbDictionary.cpp:
Add non-const get column
ndb/src/ndbapi/NdbDictionaryImpl.hpp:
Allow "partially" getTable, which enables dropping of tables
that fails to create blob tables
ndb/src/ndbapi/NdbScanOperation.cpp:
Init counter when sending SCAN_TABREQ
sql/ha_ndbcluster.cc:
Make sure that blob don't have to big part size
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r-- | sql/ha_ndbcluster.cc | 37 |
1 files changed, 36 insertions, 1 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 838cf69855a..ddc073f79eb 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3307,7 +3307,7 @@ int ha_ndbcluster::create(const char *name, { NDBTAB tab; NDBCOL col; - uint pack_length, length, i; + uint pack_length, length, i, pk_length= 0; const void *data, *pack_data; const char **key_names= form->keynames.type_names; char name2[FN_HEADLEN]; @@ -3354,6 +3354,8 @@ int ha_ndbcluster::create(const char *name, if ((my_errno= create_ndb_column(col, field, info))) DBUG_RETURN(my_errno); tab.addColumn(col); + if(col.getPrimaryKey()) + pk_length += (field->pack_length() + 3) / 4; } // No primary key, create shadow key as 64 bit, auto increment @@ -3367,6 +3369,39 @@ int ha_ndbcluster::create(const char *name, col.setPrimaryKey(TRUE); col.setAutoIncrement(TRUE); tab.addColumn(col); + pk_length += 2; + } + + // Make sure that blob tables don't have to big part size + for (i= 0; i < form->fields; i++) + { + /** + * The extra +7 concists + * 2 - words from pk in blob table + * 5 - from extra words added by tup/dict?? + */ + switch (form->field[i]->real_type()) { + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + { + NdbDictionary::Column * col = tab.getColumn(i); + int size = pk_length + (col->getPartSize()+3)/4 + 7; + if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS && + (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) + { + size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; + col->setPartSize(4*size); + } + /** + * If size > NDB_MAX and pk_length+7 >= NDB_MAX + * then the table can't be created anyway, so skip + * changing part size, and have error later + */ + } + default: + break; + } } if ((my_errno= check_ndb_connection())) |