diff options
-rw-r--r-- | Docs/manual.texi | 11 | ||||
-rw-r--r-- | bdb/include/db.src | 1 | ||||
-rw-r--r-- | mysql-test/r/bdb.result | 11 | ||||
-rw-r--r-- | sql/ha_berkeley.cc | 15 |
4 files changed, 32 insertions, 6 deletions
diff --git a/Docs/manual.texi b/Docs/manual.texi index 989d083ba00..3d355bd0410 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -3145,6 +3145,10 @@ unsubscribe from the @code{myodbc} list, send a message to @email{myodbc-subscribe@@lists.mysql.com} or @email{myodbc-unsubscribe@@lists.mysql.com}. +If you can't get an answer for your questions from the mailing list, one +option is to pay for support from @strong{MySQL AB}, which will put you +in direct contact with @strong{MySQL} developers. @xref{Support}. + The following table shows some @strong{MySQL} mailing in other languages than English. Note that these are not operated by @strong{MySQL AB}, so we can't guarantee the quality on these. @@ -7584,7 +7588,7 @@ You should also add /etc/my.cnf: open_files_limit=8192 @end example -The above should allow @strong{MySQL} to create up to 8192 connections + files. +The above should allow @strong{MySQL} to create up to 8192 connections/files. The @code{STACK_SIZE} constant in LinuxThreads controls the spacing of thread stacks in the address space. It needs to be large enough so that there will @@ -30742,7 +30746,8 @@ method for changing the limit varies widely from system to system. @code{table_cache} is related to @code{max_connections}. For example, for 200 concurrent running connections, you should have a table cache of at least @code{200 * n}, where @code{n} is the maximum number of tables -in a join. +in a join. You also need to reserve some extra file descriptors for +temporary tables and files. The cache of open tables can grow to a maximum of @code{table_cache} (default 64; this can be changed with the @code{-O table_cache=#} @@ -45727,6 +45732,8 @@ Added @code{slave_wait_timeout} for replication. @item Fixed problem with @code{UPDATE} and BDB tables. @item +Fixed hard bug in BDB tables when using key parts. +@item Fixed problem when using the @code{GRANT FILE ON database.* ...}; Previously we added the @code{DROP} privilege for the database. @item diff --git a/bdb/include/db.src b/bdb/include/db.src index bcb9033eed1..5226d4e98c6 100644 --- a/bdb/include/db.src +++ b/bdb/include/db.src @@ -119,6 +119,7 @@ struct __db_dbt { u_int32_t dlen; /* RO: get/put record length. */ u_int32_t doff; /* RO: get/put record offset. */ + void *app_private; /* Application-private handle. */ #define DB_DBT_ISSET 0x001 /* Lower level calls set value. */ #define DB_DBT_MALLOC 0x002 /* Return in malloc'd memory. */ #define DB_DBT_PARTIAL 0x004 /* Partial put/get. */ diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index c88b7375aec..89d9c56b3b0 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -511,3 +511,14 @@ id id3 100 2 KINMU_DATE KINMU_DATE +a b +1 1 +1 2 +a b a b +1 1 1 1 +1 1 1 2 +1 2 1 1 +1 2 1 2 +a b +1 1 +1 2 diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index d2b3ef62865..6907da855b9 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -351,7 +351,8 @@ berkeley_cmp_hidden_key(DB* file, const DBT *new_key, const DBT *saved_key) static int berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) { - KEY *key= (KEY*) (file->app_private); + KEY *key= (new_key->app_private ? (KEY*) new_key->app_private : + (KEY*) (file->app_private)); char *new_key_ptr= (char*) new_key->data; char *saved_key_ptr=(char*) saved_key->data; KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; @@ -388,7 +389,8 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) static int berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) { - KEY *key=(KEY*) (file->app_private); + KEY *key= (new_key->app_private ? (KEY*) new_key->app_private : + (KEY*) (file->app_private)); char *new_key_ptr= (char*) new_key->data; char *saved_key_ptr=(char*) saved_key->data; KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; @@ -730,9 +732,9 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, const byte *record, int key_length) { bzero((char*) key,sizeof(*key)); - if (hidden_primary_key && keynr == primary_key) { + /* We don't need to set app_private here */ key->data=current_ident; key->size=BDB_HIDDEN_PRIMARY_KEY_LENGTH; return key; @@ -744,6 +746,7 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, DBUG_ENTER("create_key"); key->data=buff; + key->app_private= key_info; for ( ; key_part != end && key_length > 0; key_part++) { if (key_part->null_bit) @@ -777,10 +780,11 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, KEY *key_info=table->key_info+keynr; KEY_PART_INFO *key_part=key_info->key_part; KEY_PART_INFO *end=key_part+key_info->key_parts; - DBUG_ENTER("pack_key2"); + DBUG_ENTER("bdb:pack_key"); bzero((char*) key,sizeof(*key)); key->data=buff; + key->app_private= (void*) key_info; for (; key_part != end && (int) key_length > 0 ; key_part++) { @@ -1373,6 +1377,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, bzero((char*) &key,sizeof(key)); key.data=key_buff; key.size=row->size; + key.app_private= (void*) (table->key_info+primary_key); memcpy(key_buff,row->data,row->size); /* Read the data into current_row */ current_row.flags=DB_DBT_REALLOC; @@ -1537,6 +1542,7 @@ int ha_berkeley::rnd_next(byte *buf) DBT *ha_berkeley::get_pos(DBT *to, byte *pos) { + /* We don't need to set app_private here */ bzero((char*) to,sizeof(*to)); to->data=pos; @@ -1950,6 +1956,7 @@ longlong ha_berkeley::get_auto_increment() table->next_number_key_offset); /* Store for compare */ memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size)); + old_key.app_private=(void*) key_info; error=1; { /* Modify the compare so that we will find the next key */ |