summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <tomas@whalegate.ndb.mysql.com>2007-11-02 23:33:25 +0100
committerunknown <tomas@whalegate.ndb.mysql.com>2007-11-02 23:33:25 +0100
commit082bdc7eb13615fab5671b105171abcb62483c7a (patch)
treef1f78e48b5364ba4a9224c1cf7eb86af549d05ee /sql
parent1de7e5c09f915333cd21fc3786259a3967ddf69a (diff)
parente308ad708cdec9cebb8240b4fa83a685bbc05139 (diff)
downloadmariadb-git-082bdc7eb13615fab5671b105171abcb62483c7a.tar.gz
Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.0
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.0-ndb-merge ndb/include/ndbapi/Ndb.hpp: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_ndbcluster.cc46
-rw-r--r--sql/ha_ndbcluster.h9
-rw-r--r--sql/ha_ndbcluster_cond.cc20
3 files changed, 64 insertions, 11 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 05e35bd0157..d2c580a61de 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1356,6 +1356,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec
DBUG_RETURN(0);
}
+bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
+{
+ KEY* key_info= table->key_info + keyno;
+ KEY_PART_INFO* key_part= key_info->key_part;
+ KEY_PART_INFO* end= key_part+key_info->key_parts;
+ uint i;
+ DBUG_ENTER("check_index_fields_in_write_set");
+
+ if (m_retrieve_all_fields)
+ {
+ DBUG_RETURN(true);
+ }
+ for (i= 0; key_part != end; key_part++, i++)
+ {
+ Field* field= key_part->field;
+ if (field->query_id != current_thd->query_id)
+ {
+ DBUG_RETURN(false);
+ }
+ }
+
+ DBUG_RETURN(true);
+}
+
int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno)
{
KEY* key_info= table->key_info + keyno;
@@ -1643,7 +1667,8 @@ check_null_in_record(const KEY* key_info, const byte *record)
* primary key or unique index values
*/
-int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
+int ha_ndbcluster::peek_indexed_rows(const byte *record,
+ NDB_WRITE_OP write_op)
{
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
@@ -1656,7 +1681,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
first= NULL;
- if (check_pk && table->s->primary_key != MAX_KEY)
+ if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY)
{
/*
* Fetch any row with colliding primary key
@@ -1687,9 +1712,15 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk)
*/
if (check_null_in_record(key_info, record))
{
- DBUG_PRINT("info", ("skipping check for key with NULL"));
+ DBUG_PRINT("info", ("skipping check for key with NULL"));
continue;
}
+ if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i))
+ {
+ DBUG_PRINT("info", ("skipping check for key %u not in write_set", i));
+ continue;
+ }
+
NdbIndexOperation *iop;
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
key_part= key_info->key_part;
@@ -2268,7 +2299,7 @@ int ha_ndbcluster::write_row(byte *record)
start_bulk_insert will set parameters to ensure that each
write_row is committed individually
*/
- int peek_res= peek_indexed_rows(record, true);
+ int peek_res= peek_indexed_rows(record, NDB_INSERT);
if (!peek_res)
{
@@ -2302,7 +2333,7 @@ int ha_ndbcluster::write_row(byte *record)
auto_value, 1) == -1)
{
if (--retries &&
- ndb->getNdbError().status == NdbError::TemporaryError);
+ ndb->getNdbError().status == NdbError::TemporaryError)
{
my_sleep(retry_sleep);
continue;
@@ -2456,7 +2487,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE ||
thd->lex->sql_command == SQLCOM_UPDATE_MULTI))
{
- int peek_res= peek_indexed_rows(new_data, pk_update);
+ NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE;
+ int peek_res= peek_indexed_rows(new_data, write_op);
if (!peek_res)
{
@@ -4862,7 +4894,7 @@ ulonglong ha_ndbcluster::get_auto_increment()
auto_value, cache_size, step, start))
{
if (--retries &&
- ndb->getNdbError().status == NdbError::TemporaryError);
+ ndb->getNdbError().status == NdbError::TemporaryError)
{
my_sleep(retry_sleep);
continue;
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 81cbdcd8fea..324969ad374 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -59,6 +59,12 @@ typedef struct ndb_index_data {
bool null_in_unique_index;
} NDB_INDEX_DATA;
+typedef enum ndb_write_op {
+ NDB_INSERT = 0,
+ NDB_UPDATE = 1,
+ NDB_PK_UPDATE = 2
+} NDB_WRITE_OP;
+
typedef struct st_ndbcluster_share {
THR_LOCK lock;
pthread_mutex_t mutex;
@@ -251,7 +257,7 @@ private:
const NdbOperation *first,
const NdbOperation *last,
uint errcode);
- int peek_indexed_rows(const byte *record, bool check_pk);
+ int peek_indexed_rows(const byte *record, NDB_WRITE_OP write_op);
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
@@ -286,6 +292,7 @@ private:
int get_ndb_blobs_value(NdbBlob *last_ndb_blob, my_ptrdiff_t ptrdiff);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key_from_record(NdbOperation *op, const byte *record);
+ bool check_index_fields_in_write_set(uint keyno);
int set_index_key_from_record(NdbOperation *op, const byte *record,
uint keyno);
int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0);
diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc
index ea3f8a7683a..c7b185a92f0 100644
--- a/sql/ha_ndbcluster_cond.cc
+++ b/sql/ha_ndbcluster_cond.cc
@@ -1338,9 +1338,23 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op)
if (m_cond_stack)
{
- NdbScanFilter filter(op);
+ NdbScanFilter filter(op, false); // don't abort on too large
- DBUG_RETURN(generate_scan_filter_from_cond(filter));
+ int ret=generate_scan_filter_from_cond(filter);
+ if (ret != 0)
+ {
+ const NdbError& err=filter.getNdbError();
+ if (err.code == NdbScanFilter::FilterTooLarge)
+ {
+ // err.message has static storage
+ DBUG_PRINT("info", ("%s", err.message));
+ push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ err.code, err.message);
+ ret=0;
+ }
+ }
+ if (ret != 0)
+ DBUG_RETURN(ret);
}
else
{
@@ -1391,7 +1405,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op,
{
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
- NdbScanFilter filter(op);
+ NdbScanFilter filter(op, true); // abort on too large
int res;
DBUG_ENTER("generate_scan_filter_from_key");