summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/examples/ha_archive.cc69
-rw-r--r--sql/examples/ha_archive.h2
-rw-r--r--sql/ha_ndbcluster.cc82
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/item.cc2
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h13
7 files changed, 132 insertions, 40 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index c004330932c..e71ae05734a 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -70,7 +70,6 @@
Allow users to set compression level.
Add truncate table command.
Implement versioning, should be easy.
- Implement optimize so we can fix broken tables.
Allow for errors, find a way to mark bad rows.
See if during an optimize you can make the table smaller.
Talk to the gzip guys, come up with a writable format so that updates are doable
@@ -88,6 +87,7 @@ static int archive_init= 0;
/* The file extension */
#define ARZ ".ARZ"
+#define ARN ".ARN"
/*
Used for hash table that tracks open tables.
@@ -117,7 +117,7 @@ static ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table)
if (!archive_init)
{
VOID(pthread_mutex_init(&archive_mutex,MY_MUTEX_INIT_FAST));
- if (!hash_init(&archive_open_tables,system_charset_info,32,0,0,
+ if (hash_init(&archive_open_tables,system_charset_info,32,0,0,
(hash_get_key) archive_get_key,0,0))
{
pthread_mutex_unlock(&LOCK_mysql_create_db);
@@ -205,7 +205,7 @@ static int free_share(ARCHIVE_SHARE *share)
We just implement one additional file extension.
*/
const char **ha_archive::bas_ext() const
-{ static const char *ext[]= { ARZ, NullS }; return ext; }
+{ static const char *ext[]= { ARZ, ARN, NullS }; return ext; }
/*
@@ -322,6 +322,11 @@ err:
/*
Look at ha_archive::open() for an explanation of the row format.
Here we just write out the row.
+
+ Wondering about start_bulk_insert()? We don't implement it for
+ archive since it optimizes for lots of writes. The only save
+ for implementing start_bulk_insert() is that we could skip
+ setting dirty to true each time.
*/
int ha_archive::write_row(byte * buf)
{
@@ -380,17 +385,7 @@ int ha_archive::rnd_init(bool scan)
pthread_mutex_lock(&share->mutex);
if (share->dirty == TRUE)
{
-/* I was having problems with OSX, but it worked for 10.3 so I am wrapping this with and ifdef */
-#ifdef BROKEN_GZFLUSH
- gzclose(share->archive_write);
- if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- {
- pthread_mutex_unlock(&share->mutex);
- DBUG_RETURN(errno ? errno : -1);
- }
-#else
gzflush(share->archive_write, Z_SYNC_FLUSH);
-#endif
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
@@ -504,6 +499,54 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
DBUG_RETURN(get_row(buf));
}
+/*
+ The table can become fragmented if data was inserted, read, and then
+ inserted again. What we do is open up the file and recompress it completely.
+ */
+int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ DBUG_ENTER("ha_archive::optimize");
+ int read; // Bytes read, gzread() returns int
+ gzFile reader, writer;
+ char block[IO_SIZE];
+ char writer_filename[FN_REFLEN];
+
+ /* Lets create a file to contain the new data */
+ fn_format(writer_filename,share->table_name,"",ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+
+ /* Closing will cause all data waiting to be flushed, to be flushed */
+ gzclose(share->archive_write);
+
+ if ((reader= gzopen(share->data_file_name, "rb")) == NULL)
+ DBUG_RETURN(-1);
+
+ if ((writer= gzopen(writer_filename, "wb")) == NULL)
+ {
+ gzclose(reader);
+ DBUG_RETURN(-1);
+ }
+
+ while (read= gzread(reader, block, IO_SIZE))
+ gzwrite(writer, block, read);
+
+ gzclose(reader);
+ gzclose(writer);
+
+ my_rename(writer_filename,share->data_file_name,MYF(0));
+
+ /*
+ We reopen the file in case some IO is waiting to go through.
+ In theory the table is closed right after this operation,
+ but it is possible for IO to still happen.
+ I may be being a bit too paranoid right here.
+ */
+ if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
+ DBUG_RETURN(errno ? errno : -1);
+ share->dirty= FALSE;
+
+ DBUG_RETURN(0);
+}
+
/******************************************************************************
Everything below here is default, please look at ha_example.cc for
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index f08353a5d6c..cf7becc5bc0 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -112,7 +112,7 @@ public:
int external_lock(THD *thd, int lock_type);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
-
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
};
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index da32476ae74..4e474568671 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -122,6 +122,8 @@ static const err_code_mapping err_map[]=
{ 827, HA_ERR_RECORD_FILE_FULL },
{ 832, HA_ERR_RECORD_FILE_FULL },
+ { 0, 1 },
+
{ -1, -1 }
};
@@ -173,7 +175,7 @@ void ha_ndbcluster::records_update()
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
- if (info->records == ~(ha_rows)0)
+ // if (info->records == ~(ha_rows)0)
{
Uint64 rows;
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
@@ -246,8 +248,6 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
{
int res;
const NdbError err= trans->getNdbError();
- if (!err.code)
- return 0; // Don't log things to DBUG log if no error
DBUG_ENTER("ndb_err");
ERR_PRINT(err);
@@ -283,10 +283,11 @@ bool ha_ndbcluster::get_error_message(int error,
DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", error));
- if (!m_ndb)
+ Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
+ if (!ndb)
DBUG_RETURN(false);
- const NdbError err= m_ndb->getNdbError(error);
+ const NdbError err= ndb->getNdbError(error);
bool temporary= err.status==NdbError::TemporaryError;
buf->set(err.message, strlen(err.message), &my_charset_bin);
DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
@@ -516,7 +517,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
*/
int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
- uint fieldnr)
+ uint fieldnr, byte* buf)
{
DBUG_ENTER("get_ndb_value");
DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr,
@@ -524,12 +525,15 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
if (field != NULL)
{
+ DBUG_ASSERT(buf);
if (ndb_supported_type(field->type()))
{
DBUG_ASSERT(field->ptr != NULL);
if (! (field->flags & BLOB_FLAG))
- {
- m_value[fieldnr].rec= ndb_op->getValue(fieldnr, field->ptr);
+ {
+ byte *field_buf= buf + (field->ptr - table->record[0]);
+ m_value[fieldnr].rec= ndb_op->getValue(fieldnr,
+ field_buf);
DBUG_RETURN(m_value[fieldnr].rec == NULL);
}
@@ -603,7 +607,7 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
- if (!(tab= dict->getTable(m_tabname, &m_table_info)))
+ if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
@@ -651,8 +655,8 @@ int ha_ndbcluster::get_metadata(const char *path)
if (error)
DBUG_RETURN(error);
- // All checks OK, lets use the table
- m_table= (void*)tab;
+ m_table= NULL;
+ m_table_info= NULL;
DBUG_RETURN(build_index_list(table, ILBP_OPEN));
}
@@ -767,6 +771,7 @@ void ha_ndbcluster::release_metadata()
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
m_table= NULL;
+ m_table_info= NULL;
// Release index list
for (i= 0; i < MAX_KEY; i++)
@@ -947,7 +952,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
ERR_RETURN(trans->getNdbError());
// Read key at the same time, for future reference
- if (get_ndb_value(op, NULL, no_fields))
+ if (get_ndb_value(op, NULL, no_fields, NULL))
ERR_RETURN(trans->getNdbError());
}
else
@@ -964,7 +969,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
if ((thd->query_id == field->query_id) ||
retrieve_all_fields)
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
}
else
@@ -1018,7 +1023,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
if (!(field->flags & PRI_KEY_FLAG) &&
(thd->query_id != field->query_id))
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, new_data))
ERR_RETURN(trans->getNdbError());
}
}
@@ -1081,7 +1086,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG))
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
else
@@ -1480,7 +1485,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
(field->flags & PRI_KEY_FLAG) ||
retrieve_all_fields)
{
- if (get_ndb_value(op, field, i))
+ if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
}
else
@@ -1499,7 +1504,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
if (!tab->getColumn(hidden_no))
DBUG_RETURN(1);
#endif
- if (get_ndb_value(op, NULL, hidden_no))
+ if (get_ndb_value(op, NULL, hidden_no, NULL))
ERR_RETURN(op->getNdbError());
}
@@ -1521,6 +1526,11 @@ int ha_ndbcluster::write_row(byte *record)
NdbOperation *op;
int res;
DBUG_ENTER("write_row");
+
+ if(m_ignore_dup_key_not_supported)
+ {
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_default_now)
@@ -2385,7 +2395,17 @@ void ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_VARIABLE)
{
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
- records_update();
+ if (m_table_info)
+ {
+ records_update();
+ }
+ else
+ {
+ Uint64 rows;
+ if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
+ records= rows;
+ }
+ }
}
if (flag & HA_STATUS_ERRKEY)
{
@@ -2479,14 +2499,20 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
-
- DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
- m_use_write= TRUE;
+ if (current_thd->lex->sql_command == SQLCOM_REPLACE)
+ {
+ DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
+ m_use_write= TRUE;
+ } else
+ {
+ m_ignore_dup_key_not_supported= TRUE;
+ }
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= false;
+ m_ignore_dup_key_not_supported= false;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
@@ -2766,6 +2792,16 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
// Start of transaction
retrieve_all_fields= FALSE;
ops_pending= 0;
+ {
+ NDBDICT *dict= m_ndb->getDictionary();
+ const NDBTAB *tab;
+ void *tab_info;
+ if (!(tab= dict->getTable(m_tabname, &tab_info)))
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
+ m_table= (void *)tab;
+ m_table_info= tab_info;
+ }
no_uncommitted_rows_init(thd);
}
else
@@ -2788,6 +2824,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
thd->transaction.stmt.ndb_tid= 0;
}
}
+ m_table= NULL;
+ m_table_info= NULL;
if (m_active_trans)
DBUG_PRINT("warning", ("m_active_trans != NULL"));
if (m_active_cursor)
@@ -3273,6 +3311,7 @@ int ha_ndbcluster::alter_table_name(const char *from, const char *to)
ERR_RETURN(dict->getNdbError());
m_table= NULL;
+ m_table_info= NULL;
DBUG_RETURN(0);
}
@@ -3364,6 +3403,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(false),
+ m_ignore_dup_key_not_supported(false),
retrieve_all_fields(FALSE),
rows_to_insert(1),
rows_inserted(0),
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index a25d3e18310..c0ef172413f 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -211,7 +211,7 @@ class ha_ndbcluster: public handler
int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr);
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
- int get_ndb_value(NdbOperation*, Field *field, uint fieldnr);
+ int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*);
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
@@ -245,6 +245,7 @@ class ha_ndbcluster: public handler
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
+ bool m_ignore_dup_key_not_supported;
bool retrieve_all_fields;
ha_rows rows_to_insert;
ha_rows rows_inserted;
diff --git a/sql/item.cc b/sql/item.cc
index 8233d050783..14136435a50 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -891,7 +891,7 @@ int Item_param::save_in_field(Field *field, bool no_conversions)
return field->store(str_value.ptr(), str_value.length(),
str_value.charset());
case NULL_VALUE:
- return set_field_to_null(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
case NO_VALUE:
default:
DBUG_ASSERT(0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 2074f0f5d40..6cf01896b03 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -328,6 +328,7 @@ void THD::change_user(void)
cleanup();
cleanup_done= 0;
init();
+ stmt_map.reset();
hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0,
(hash_get_key) get_var_key,
(hash_free_key) free_user_var, 0);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 89bf2dde12e..68d187168d3 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -578,7 +578,7 @@ public:
assignment in Statement::Statement)
Non-empty statement names are unique too: attempt to insert a new statement
with duplicate name causes older statement to be deleted
-
+
Statements are auto-deleted when they are removed from the map and when the
map is deleted.
*/
@@ -587,7 +587,7 @@ class Statement_map
{
public:
Statement_map();
-
+
int insert(Statement *statement);
Statement *find_by_name(LEX_STRING *name)
@@ -620,11 +620,18 @@ public:
}
hash_delete(&st_hash, (byte *) statement);
}
+ /* Erase all statements (calls Statement destructor) */
+ void reset()
+ {
+ hash_reset(&names_hash);
+ hash_reset(&st_hash);
+ last_found_statement= 0;
+ }
~Statement_map()
{
- hash_free(&st_hash);
hash_free(&names_hash);
+ hash_free(&st_hash);
}
private:
HASH st_hash;