summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@donna.mysql.com>2000-12-08 17:04:57 +0200
committerunknown <monty@donna.mysql.com>2000-12-08 17:04:57 +0200
commit19d406d937d9b133c07acf370a5ba3c53bbc2ed7 (patch)
treeb351c53cc8d10719148a114a60853aa9347782e0 /sql
parent1324803d765267696b4d75b5d0a2546dc2686881 (diff)
downloadmariadb-git-19d406d937d9b133c07acf370a5ba3c53bbc2ed7.tar.gz
Lots of fixes for BDB tables
Change DROP TABLE to first drop the data, then the .frm file Docs/manual.texi: Updated TODO and Changelog include/Makefile.am: Portability fix mysql-test/misc/select.res: ***MISSING WEAVE*** mysys/mf_iocache2.c: cleanup scripts/mysqlhotcopy.sh: Fixed --noindices sql-bench/Results/ATIS-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/RUN-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/alter-table-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/big-tables-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/connect-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/create-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/insert-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/select-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/Results/wisconsin-pg-Linux_2.2.14_my_SMP_i686-cmp-mysql,pg: Updated benchmarks sql-bench/limits/pg.cfg: Updated to new crash-me sql-bench/server-cfg.sh: Fixes for pg 7.0.2 sql/ha_berkeley.cc: Lots of BDB table fixes sql/ha_berkeley.h: Lots of BDB table fixes sql/handler.cc: Change DROP TABLE to first drop the data, then the .frm file sql/hostname.cc: Fixes for empty hostnames sql/log.cc: Fixed transaction logging sql/share/swedish/errmsg.OLD: cleanup sql/sql_delete.cc: Fixes for logging sql/sql_insert.cc: Fixes for logging sql/sql_select.cc: Fixes for BDB tables sql/sql_table.cc: Change DROP TABLE to first drop the data, then the .frm file sql/sql_update.cc: Fixes for logging BitKeeper/etc/ignore: Added scripts/mysqld_multi to the ignore list BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_berkeley.cc233
-rw-r--r--sql/ha_berkeley.h14
-rw-r--r--sql/handler.cc14
-rw-r--r--sql/hostname.cc8
-rw-r--r--sql/log.cc8
-rw-r--r--sql/share/swedish/errmsg.OLD1
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_select.cc6
-rw-r--r--sql/sql_table.cc34
-rw-r--r--sql/sql_update.cc2
11 files changed, 203 insertions, 121 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 694823883ac..fc293c1e8af 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -1,15 +1,15 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
@@ -64,7 +64,7 @@
#include <stdarg.h>
#define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */
-#define HA_BERKELEY_RANGE_COUNT 100
+#define HA_BERKELEY_RANGE_COUNT 100
#define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */
/* extra rows for estimate_number_of_rows() */
#define HA_BERKELEY_EXTRA_ROWS 100
@@ -99,6 +99,7 @@ static byte* bdb_get_key(BDB_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
static BDB_SHARE *get_share(const char *table_name, TABLE *table);
static void free_share(BDB_SHARE *share, TABLE *table);
+static int write_status(DB *status_block, char *buff, uint length);
static void update_status(BDB_SHARE *share, TABLE *table);
static void berkeley_noticecall(DB_ENV *db_env, db_notices notice);
@@ -131,7 +132,7 @@ bool berkeley_init(void)
db_env->set_verbose(db_env,
DB_VERB_CHKPOINT | DB_VERB_DEADLOCK | DB_VERB_RECOVERY,
1);
-
+
db_env->set_cachesize(db_env, 0, berkeley_cache_size, 0);
db_env->set_lk_detect(db_env, berkeley_lock_type);
if (berkeley_max_lock)
@@ -139,7 +140,7 @@ bool berkeley_init(void)
if (db_env->open(db_env,
berkeley_home,
- berkeley_init_flags | DB_INIT_LOCK |
+ berkeley_init_flags | DB_INIT_LOCK |
DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
DB_CREATE | DB_THREAD, 0666))
{
@@ -271,7 +272,7 @@ berkeley_cmp_hidden_key(DB* file, const DBT *new_key, const DBT *saved_key)
{
ulonglong a=uint5korr((char*) new_key->data);
ulonglong b=uint5korr((char*) saved_key->data);
- return a < b ? -1 : (a > b ? 1 : 0);
+ return a < b ? -1 : (a > b ? 1 : 0);
}
static int
@@ -338,7 +339,7 @@ static bool
berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length)
{
KEY_PART_INFO *key_part= key_info->key_part,
- *end=key_part+key_info->key_parts;
+ *end=key_part+key_info->key_parts;
for ( ; key_part != end && (int) key_length > 0; key_part++)
{
@@ -433,7 +434,6 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
}
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
transaction=0;
cursor=0;
key_read=0;
@@ -485,6 +485,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
share->status|=STATUS_PRIMARY_KEY_INIT;
}
get_status();
+ info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
DBUG_RETURN(0);
}
@@ -611,7 +612,7 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index)
{
KEY *key_info=table->key_info+index;
KEY_PART_INFO *key_part= key_info->key_part,
- *end=key_part+key_info->key_parts;
+ *end=key_part+key_info->key_parts;
char *pos=(char*) key->data;
for ( ; key_part != end; key_part++)
@@ -712,7 +713,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff,
continue;
}
key_ptr++;
- }
+ }
buff=key_part->field->keypack(buff,key_ptr+offset,key_part->length);
key_ptr+=key_part->store_length;
key_length-=key_part->store_length;
@@ -817,7 +818,7 @@ int ha_berkeley::key_cmp(uint keynr, const byte * old_row,
}
if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH))
{
-
+
if (key_part->field->cmp_binary(old_row + key_part->offset,
new_row + key_part->offset,
(ulong) key_part->length))
@@ -855,7 +856,7 @@ int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed,
DBUG_RETURN(error); // This should always succeed
if ((error=pack_row(&row, new_row, 0)))
{
- // Out of memory (this shouldn't happen!)
+ // Out of memory (this shouldn't happen!)
(void) file->put(file, trans, &old_key, &row,
key_type[primary_key]);
DBUG_RETURN(error);
@@ -906,7 +907,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
else
{
create_key(&prim_key, primary_key, key_buff, new_row);
-
+
if ((primary_key_changed=key_cmp(primary_key, old_row, new_row)))
create_key(&old_prim_key, primary_key, primary_key_buff, old_row);
else
@@ -1007,10 +1008,10 @@ int ha_berkeley::remove_key(DB_TXN *sub_trans, uint keynr, const byte *record,
if (!(error=file->cursor(key_file[keynr], sub_trans, &cursor, 0)))
{
if (!(error=cursor->c_get(cursor,
- (keynr == primary_key ?
+ (keynr == primary_key ?
prim_key :
create_key(&key, keynr, key_buff2, record)),
- (keynr == primary_key ?
+ (keynr == primary_key ?
packed_record : prim_key),
DB_GET_BOTH)))
{ // This shouldn't happen
@@ -1055,7 +1056,7 @@ int ha_berkeley::delete_row(const byte * record)
key_map keys=table->keys_in_use;
DBUG_ENTER("delete_row");
statistic_increment(ha_delete_count,&LOCK_status);
-
+
if ((error=pack_row(&row, record, 0)))
DBUG_RETURN((error));
create_key(&prim_key, primary_key, key_buff, record);
@@ -1106,7 +1107,7 @@ int ha_berkeley::index_init(uint keynr)
dbug_assert(cursor == 0);
if ((error=file->cursor(key_file[keynr], transaction, &cursor,
table->reginfo.lock_type > TL_WRITE_ALLOW_READ ?
- DB_RMW : 0)))
+ 0 : 0)))
cursor=0; // Safety
bzero((char*) &last_key,sizeof(last_key));
DBUG_RETURN(error);
@@ -1269,7 +1270,7 @@ int ha_berkeley::index_prev(byte * buf)
DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
buf, active_index, &row, &last_key, 1));
}
-
+
int ha_berkeley::index_first(byte * buf)
{
@@ -1469,7 +1470,7 @@ int ha_berkeley::external_lock(THD *thd, int lock_type)
{
if (thd->transaction.stmt.bdb_tid)
{
- /*
+ /*
F_UNLOCK is done without a transaction commit / rollback.
This happens if the thread didn't update any rows
We must in this case commit the work to keep the row locks
@@ -1481,7 +1482,7 @@ int ha_berkeley::external_lock(THD *thd, int lock_type)
}
}
DBUG_RETURN(error);
-}
+}
THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to,
@@ -1539,6 +1540,7 @@ int ha_berkeley::create(const char *name, register TABLE *form,
char name_buff[FN_REFLEN];
char part[7];
uint index=1;
+ int error=1;
DBUG_ENTER("ha_berkeley::create");
fn_format(name_buff,name,"", ha_berkeley_ext,2 | 4);
@@ -1563,9 +1565,22 @@ int ha_berkeley::create(const char *name, register TABLE *form,
/* Create the status block to save information from last status command */
/* Is DB_BTREE the best option here ? (QUEUE can't be used in sub tables) */
- if (create_sub_table(name_buff,"status",DB_BTREE,0))
- DBUG_RETURN(1);
- DBUG_RETURN(0);
+
+ DB *status_block;
+ if (!db_create(&status_block, db_env, 0))
+ {
+ if (!status_block->open(status_block, name_buff,
+ "status", DB_BTREE, DB_CREATE, 0))
+ {
+ char rec_buff[4+MAX_KEY*4];
+ uint length= 4+ table->keys*4;
+ bzero(rec_buff, length);
+ if (!write_status(status_block, rec_buff, length))
+ error=0;
+ status_block->close(status_block,0);
+ }
+ }
+ DBUG_RETURN(error);
}
@@ -1574,13 +1589,10 @@ int ha_berkeley::delete_table(const char *name)
int error;
char name_buff[FN_REFLEN];
if ((error=db_create(&file, db_env, 0)))
- {
my_errno=error;
- file=0;
- return 1;
- }
- error=file->remove(file,fn_format(name_buff,name,"",ha_berkeley_ext,2 | 4),
- NULL,0);
+ else
+ error=file->remove(file,fn_format(name_buff,name,"",ha_berkeley_ext,2 | 4),
+ NULL,0);
file=0; // Safety
return error;
}
@@ -1659,23 +1671,22 @@ longlong ha_berkeley::get_auto_increment()
table->next_number_key_offset);
/* Store for compare */
memcpy(key_buff2, key_buff, (key_len=last_key.size));
- key_info->handler.bdb_return_if_eq= -1;
- error=read_row(cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE),
- table->record[1], active_index, &row, (DBT*) 0, 0);
+ /* Modify the compare so that we will find the next key */
+ key_info->handler.bdb_return_if_eq= 1;
+ /* We lock the next key as the new key will probl. be on the same page */
+ error=cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE | DB_RMW),
key_info->handler.bdb_return_if_eq= 0;
- if (!error && !berkeley_key_cmp(table, key_info, key_buff2, key_len))
+
+ if (!error || error == DB_NOTFOUND)
{
/*
- Found matching key; Now search after next key, go one step back
- and then we should have found the biggest key with the given
- prefix
+ Now search go one step back and then we should have found the
+ biggest key with the given prefix
*/
- (void) read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_NODUP),
- table->record[1], active_index, &row, (DBT*) 0, 0);
- if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
+ if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV | DB_RMW),
table->record[1], active_index, &row, (DBT*) 0, 0) ||
berkeley_key_cmp(table, key_info, key_buff2, key_len))
- error=1; // Something went wrong
+ error=1; // Something went wrong or no such key
}
}
nr=(longlong)
@@ -1718,25 +1729,47 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name,
int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
- DB_BTREE_STAT stat;
+ DB_BTREE_STAT *stat=0;
uint i;
for (i=0 ; i < table->keys ; i++)
{
- file->stat(key_file[i], (void*) &stat, 0, 0);
- share->rec_per_key[i]= stat.bt_ndata / stat.bt_nkeys;
+ if (stat)
+ {
+ free(stat);
+ stat=0;
+ }
+ if (file->stat(key_file[i], (void*) &stat, 0, 0))
+ goto err;
+ share->rec_per_key[i]= (stat->bt_ndata /
+ (stat->bt_nkeys ? stat->bt_nkeys : 1));
}
- /* If hidden primary key */
+ /* A hidden primary key is not in key_file[] */
if (hidden_primary_key)
- file->stat(file, (void*) &stat, 0, 0);
+ {
+ if (stat)
+ {
+ free(stat);
+ stat=0;
+ }
+ if (file->stat(file, (void*) &stat, 0, 0))
+ goto err;
+ }
pthread_mutex_lock(&share->mutex);
- share->rows=stat.bt_ndata;
+ share->rows=stat->bt_ndata;
share->status|=STATUS_BDB_ANALYZE; // Save status on close
share->version++; // Update stat in table
pthread_mutex_unlock(&share->mutex);
- update_status(share,table); // Write status to file
+ update_status(share,table); // Write status to file
+ if (stat)
+ free(stat);
return ((share->status & STATUS_BDB_ANALYZE) ? HA_ADMIN_FAILED :
HA_ADMIN_OK);
+
+err:
+ if (stat)
+ free(stat);
+ return HA_ADMIN_FAILED;
}
int ha_berkeley::optimize(THD* thd, HA_CHECK_OPT* check_opt)
@@ -1749,25 +1782,65 @@ int ha_berkeley::check(THD* thd, HA_CHECK_OPT* check_opt)
{
char name_buff[FN_REFLEN];
int error;
+ DB *tmp_file;
+ DBUG_ENTER("ha_berkeley::check");
+
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+
+#ifdef NOT_YET
+ /*
+ To get this to work we need to ensure that no running transaction is
+ using the table. We also need to create a new environment without
+ locking for this.
+ */
+
+ /* We must open the file again to be able to check it! */
+ if ((error=db_create(&tmp_file, db_env, 0)))
+ {
+ print_msg(thd, table->real_name, "check", "error",
+ "Got error %d creating environment",error);
+ DBUG_RETURN(HA_ADMIN_FAILED);
+ }
+
+ /* Compare the overall structure */
+ tmp_file->set_bt_compare(tmp_file,
+ (hidden_primary_key ? berkeley_cmp_hidden_key :
+ berkeley_cmp_packed_key));
+ file->app_private= (void*) (table->key_info+table->primary_key);
fn_format(name_buff,share->table_name,"", ha_berkeley_ext, 2 | 4);
- if ((error=file->verify(file, name_buff, NullS, (FILE*) 0,
- hidden_primary_key ? 0 : DB_NOORDERCHK)))
+ if ((error=tmp_file->verify(tmp_file, name_buff, NullS, (FILE*) 0,
+ hidden_primary_key ? 0 : DB_NOORDERCHK)))
{
print_msg(thd, table->real_name, "check", "error",
"Got error %d checking file structure",error);
- return HA_ADMIN_CORRUPT;
+ tmp_file->close(tmp_file,0);
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
}
- for (uint i=0 ; i < table->keys ; i++)
+
+ /* Check each index */
+ tmp_file->set_bt_compare(tmp_file, berkeley_cmp_packed_key);
+ for (uint index=0,i=0 ; i < table->keys ; i++)
{
- if ((error=file->verify(key_file[i], name_buff, NullS, (FILE*) 0,
- DB_ORDERCHKONLY)))
+ char part[7];
+ if (i == primary_key)
+ strmov(part,"main");
+ else
+ sprintf(part,"key%02d",++index);
+ tmp_file->app_private= (void*) (table->key_info+i);
+ if ((error=tmp_file->verify(tmp_file, name_buff, part, (FILE*) 0,
+ DB_ORDERCHKONLY)))
{
print_msg(thd, table->real_name, "check", "error",
- "Key %d was not in order",error);
- return HA_ADMIN_CORRUPT;
+ "Key %d was not in order (Error: %d)",
+ index+ test(i >= primary_key),
+ error);
+ tmp_file->close(tmp_file,0);
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
}
}
- return HA_ADMIN_OK;
+ tmp_file->close(tmp_file,0);
+ DBUG_RETURN(HA_ADMIN_OK);
+#endif
}
/****************************************************************************
@@ -1856,8 +1929,8 @@ void ha_berkeley::get_status()
fn_format(name_buff, share->table_name,"", ha_berkeley_ext, 2 | 4);
if (!db_create(&share->status_block, db_env, 0))
{
- if (!share->status_block->open(share->status_block, name_buff,
- "status", DB_BTREE, open_mode, 0))
+ if (share->status_block->open(share->status_block, name_buff,
+ "status", DB_BTREE, open_mode, 0))
{
share->status_block->close(share->status_block, 0);
share->status_block=0;
@@ -1871,15 +1944,16 @@ void ha_berkeley::get_status()
if (!file->cursor(share->status_block, 0, &cursor, 0))
{
DBT row;
- char rec_buff[64],*pos=rec_buff;
+ char rec_buff[64];
bzero((char*) &row,sizeof(row));
bzero((char*) &last_key,sizeof(last_key));
row.data=rec_buff;
- row.size=sizeof(rec_buff);
+ row.ulen=sizeof(rec_buff);
row.flags=DB_DBT_USERMEM;
if (!cursor->c_get(cursor, &last_key, &row, DB_FIRST))
{
uint i;
+ uchar *pos=(uchar*) row.data;
share->org_rows=share->rows=uint4korr(pos); pos+=4;
for (i=0 ; i < table->keys ; i++)
{
@@ -1896,6 +1970,24 @@ void ha_berkeley::get_status()
}
+static int write_status(DB *status_block, char *buff, uint length)
+{
+ DB_TXN *trans;
+ DBT row,key;
+ int error;
+ const char *key_buff="status";
+
+ bzero((char*) &row,sizeof(row));
+ bzero((char*) &key,sizeof(key));
+ row.data=buff;
+ key.data=(void*) key_buff;
+ key.size=sizeof(key_buff);
+ row.size=length;
+ error=status_block->put(status_block, 0, &key, &row, 0);
+ return error;
+}
+
+
static void update_status(BDB_SHARE *share, TABLE *table)
{
DBUG_ENTER("update_status");
@@ -1922,25 +2014,18 @@ static void update_status(BDB_SHARE *share, TABLE *table)
goto end;
}
{
- uint i;
- DBT row,key;
- char rec_buff[4+MAX_KEY*sizeof(ulong)], *pos=rec_buff;
+ char rec_buff[4+MAX_KEY*4], *pos=rec_buff;
const char *key_buff="status";
-
- bzero((char*) &row,sizeof(row));
- bzero((char*) &key,sizeof(key));
- row.data=rec_buff;
- key.data=(void*) key_buff;
- key.size=sizeof(key_buff);
- row.flags=key.flags=DB_DBT_USERMEM;
int4store(pos,share->rows); pos+=4;
- for (i=0 ; i < table->keys ; i++)
+ for (uint i=0 ; i < table->keys ; i++)
{
int4store(pos,share->rec_per_key[i]); pos+=4;
}
- row.size=(uint) (pos-rec_buff);
- (void) share->status_block->put(share->status_block, 0, &key, &row, 0);
+ DBUG_PRINT("info",("updating status for %s",share->table_name));
+ (void) write_status(share->status_block, rec_buff,
+ (uint) (pos-rec_buff));
share->status&= ~STATUS_BDB_ANALYZE;
+ share->org_rows=share->rows;
}
end:
pthread_mutex_unlock(&share->mutex);
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 4ee682664a5..b17d0f041ba 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -1,15 +1,15 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
@@ -54,7 +54,7 @@ class ha_berkeley: public handler
ulong changed_rows;
uint primary_key,last_dup_key, hidden_primary_key, version;
bool fixed_length_row, fixed_length_primary_key, key_read;
- bool fix_rec_buff_for_blob(ulong length);
+ bool fix_rec_buff_for_blob(ulong length);
byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
ulong max_row_length(const byte *buf);
@@ -82,7 +82,7 @@ class ha_berkeley: public handler
HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
- HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
+ HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE |
HA_AUTO_PART_KEY),
last_dup_key((uint) -1),version(0)
@@ -93,8 +93,8 @@ class ha_berkeley: public handler
const char **bas_ext() const;
ulong option_flag() const { return int_option_flag; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return MAX_KEY-1; }
- uint max_key_parts() const { return MAX_REF_PARTS; }
+ uint max_keys() const { return MAX_KEY-1; }
+ uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAX_KEY_LENGTH; }
uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_number_of_rows();
diff --git a/sql/handler.cc b/sql/handler.cc
index 7c6a3e32ff2..24bf16b3604 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -297,12 +297,16 @@ bool ha_flush_logs()
return result;
}
+/*
+ This should return ENOENT if the file doesn't exists.
+ The .frm file will be deleted only if we return 0 or ENOENT
+*/
int ha_delete_table(enum db_type table_type, const char *path)
{
handler *file=get_new_handler((TABLE*) 0, table_type);
if (!file)
- return -1;
+ return ENOENT;
int error=file->delete_table(path);
delete file;
return error;
@@ -620,12 +624,16 @@ uint handler::get_dup_key(int error)
int handler::delete_table(const char *name)
{
+ int error=0;
for (const char **ext=bas_ext(); *ext ; ext++)
{
if (delete_file(name,*ext,2))
- return my_errno;
+ {
+ if ((error=errno) != ENOENT)
+ break;
+ }
}
- return 0;
+ return error;
}
diff --git a/sql/hostname.cc b/sql/hostname.cc
index db8f8349446..1c52a5363d3 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -81,10 +81,12 @@ static void add_hostname(struct in_addr *in,const char *name)
if ((entry=(host_entry*) malloc(sizeof(host_entry)+length+1)))
{
- char *new_name= (char *) (entry+1);
+ char *new_name;
memcpy_fixed(&entry->ip, &in->s_addr, sizeof(in->s_addr));
- memcpy(new_name, name, length); // Should work even if name == NULL
- new_name[length]=0; // End of string
+ if (length)
+ memcpy(new_name= (char *) (entry+1), name, length+1);
+ else
+ new_name=0;
entry->hostname=new_name;
entry->errors=0;
(void) hostname_cache->add(entry);
diff --git a/sql/log.cc b/sql/log.cc
index d87fdd1b7e5..49e0faf4a7a 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -686,10 +686,9 @@ bool MYSQL_LOG::write(IO_CACHE *cache)
uint length;
my_off_t start_pos=my_b_tell(&log_file);
- if (reinit_io_cache(cache, WRITE_CACHE, 0, 0, 0))
+ if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
{
- if (!write_error)
- sql_print_error(ER(ER_ERROR_ON_WRITE), cache->file_name, errno);
+ sql_print_error(ER(ER_ERROR_ON_WRITE), cache->file_name, errno);
goto err;
}
while ((length=my_b_fill(cache)))
@@ -710,8 +709,7 @@ bool MYSQL_LOG::write(IO_CACHE *cache)
}
if (cache->error) // Error on read
{
- if (!write_error)
- sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
+ sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
goto err;
}
}
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
index ea8c2b78e3f..ee3d913e51a 100644
--- a/sql/share/swedish/errmsg.OLD
+++ b/sql/share/swedish/errmsg.OLD
@@ -198,5 +198,4 @@
"Tabell '%-.64s' är crashad och bör repareras med REPAIR TABLE",
"Tabell '%-.64s' är crashad och senast (automatiska?) reparation misslyckades",
"Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK",
-#ER_TRANS_CACHE_FULL
"Transaktionen krävde mera än 'max_binlog_cache_size' minne. Utöka denna mysqld variabel och försök på nytt",
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index dbcd2640dbc..eab67c835fd 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -215,7 +215,7 @@ int mysql_delete(THD *thd,TABLE_LIST *table_list,COND *conds,ha_rows limit,
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
using_transactions=table->file->has_transactions();
- if (deleted && (error == 0 || !using_transactions))
+ if (deleted && (error <= 0 || !using_transactions))
{
mysql_update_log.write(thd,thd->query, thd->query_length);
if (mysql_bin_log.is_open())
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index baaf65d4197..13da095607d 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -256,7 +256,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
else if (table->next_number_field)
id=table->next_number_field->val_int(); // Return auto_increment value
using_transactions=table->file->has_transactions();
- if ((info.copied || info.deleted) && (error == 0 || !using_transactions))
+ if ((info.copied || info.deleted) && (error <= 0 || !using_transactions))
{
mysql_update_log.write(thd, thd->query, thd->query_length);
if (mysql_bin_log.is_open())
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 31fc3f28099..1359551fcf1 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -863,7 +863,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
else
s->dependent=(table_map) 0;
s->key_dependent=(table_map) 0;
- if ((table->system || table->file->records <= 1L) && ! s->dependent)
+ if ((table->system || table->file->records <= 1) && ! s->dependent &&
+ !(table->file->option_flag() & HA_NOT_EXACT_COUNT))
{
s->type=JT_SYSTEM;
const_table_map|=table->map;
@@ -924,7 +925,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
{
if (s->dependent & ~(const_table_map)) // All dep. must be constants
continue;
- if (s->table->file->records <= 1L)
+ if (s->table->file->records <= 1L &&
+ !(s->table->file->option_flag() & HA_NOT_EXACT_COUNT))
{ // system table
s->type=JT_SYSTEM;
const_table_map|=s->table->map;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index dc1ceb1112b..78b202e538c 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -110,24 +110,25 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists)
table_type=get_table_type(path);
- if (my_delete(path,MYF(0))) /* Delete the table definition file */
+ if (access(path,F_OK))
{
- if (errno != ENOENT || !if_exists)
- {
+ if (!if_exists)
error=1;
- if (errno != ENOENT)
- {
- my_error(ER_CANT_DELETE_FILE,MYF(0),path,errno);
- }
- }
}
else
{
- some_tables_deleted=1;
- *fn_ext(path)=0; // Remove extension;
+ char *end;
+ *(end=fn_ext(path))=0; // Remove extension
error=ha_delete_table(table_type, path);
if (error == ENOENT && if_exists)
error = 0;
+ if (!error || error == ENOENT)
+ {
+ /* Delete the table definition file */
+ strmov(end,reg_ext);
+ if (!(error=my_delete(path,MYF(MY_WME))))
+ some_tables_deleted=1;
+ }
}
if (error)
{
@@ -1427,17 +1428,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->count_cuted_fields=0; /* Don`t calc cuted fields */
new_table->time_stamp=save_time_stamp;
-#if defined( __WIN__) || defined( __EMX__)
- /*
- We must do the COMMIT here so that we can close and rename the
- temporary table (as windows can't rename open tables)
- */
- if (ha_commit_stmt(thd))
- error=1;
- if (ha_commit(thd))
- error=1;
-#endif
-
if (table->tmp_table)
{
/* We changed a temporary table */
@@ -1556,7 +1546,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
}
-#if !(defined( __WIN__) || defined( __EMX__))
/* The ALTER TABLE is always in it's own transaction */
error = ha_commit_stmt(thd);
if (ha_commit(thd))
@@ -1567,7 +1556,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
-#endif
thd->proc_info="end";
mysql_update_log.write(thd, thd->query,thd->query_length);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 54f85eb2ec7..c52370c02fd 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -238,7 +238,7 @@ int mysql_update(THD *thd,TABLE_LIST *table_list,List<Item> &fields,
VOID(table->file->extra(HA_EXTRA_READCHECK));
table->time_stamp=save_time_stamp; // Restore auto timestamp pointer
using_transactions=table->file->has_transactions();
- if (updated && (error == 0 || !using_transactions))
+ if (updated && (error <= 0 || !using_transactions))
{
mysql_update_log.write(thd,thd->query,thd->query_length);
if (mysql_bin_log.is_open())