diff options
author | jani@ua141d10.elisa.omakaista.fi <> | 2007-04-12 12:50:02 +0300 |
---|---|---|
committer | jani@ua141d10.elisa.omakaista.fi <> | 2007-04-12 12:50:02 +0300 |
commit | 335153121bdb0152eeaf2d2adeb326fff51e6052 (patch) | |
tree | 30b412b9ebad3ddaa08bcf7eb478c89eeebc2ed2 /sql/ha_ndbcluster.cc | |
parent | 102f58fe34eb0120e05927c56d52e9a57e3b9e3a (diff) | |
parent | bd461b01f05aa08062b75bffab38ff3299d1e08c (diff) | |
download | mariadb-git-335153121bdb0152eeaf2d2adeb326fff51e6052.tar.gz |
Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-5.0
into ua141d10.elisa.omakaista.fi:/home/my/bk/mysql-5.0-marvel
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r-- | sql/ha_ndbcluster.cc | 73 |
1 files changed, 53 insertions, 20 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b02be09251f..33744b49cef 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -203,6 +203,8 @@ static const err_code_mapping err_map[]= { 284, HA_ERR_TABLE_DEF_CHANGED, 0 }, + {4009, HA_ERR_NO_CONNECTION, 1 }, + { 0, 1, 0 }, { -1, -1, 1 } @@ -2521,8 +2523,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) ERR_RETURN(op->getNdbError()); } - // Execute update operation - if (!cursor && execute_no_commit(this,trans,false) != 0) { + /* + Execute update operation if we are not doing a scan for update + and there exist UPDATE AFTER triggers + */ + + if ((!cursor || m_update_cannot_batch) && + execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2563,7 +2570,7 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (!m_primary_key_update) + if (!(m_primary_key_update || m_delete_cannot_batch)) // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } @@ -3404,6 +3411,16 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); m_use_write= FALSE; break; + case HA_EXTRA_DELETE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); + m_delete_cannot_batch= TRUE; + break; + case HA_EXTRA_UPDATE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); + m_update_cannot_batch= TRUE; + break; + default: + break; } DBUG_RETURN(0); @@ -3420,6 +3437,8 @@ int ha_ndbcluster::reset() m_retrieve_primary_key= FALSE; m_ignore_dup_key= FALSE; m_use_write= FALSE; + m_delete_cannot_batch= FALSE; + m_update_cannot_batch= FALSE; DBUG_RETURN(0); } @@ -4785,6 +4804,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_bulk_insert_rows((ha_rows) 1024), m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), + m_delete_cannot_batch(FALSE), + m_update_cannot_batch(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), m_blobs_pending(0), @@ -5067,14 +5088,11 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name dict->invalidateTable(name); if (!(tab= dict->getTable(name))) { - const NdbError err= dict->getNdbError(); - if (err.code == 709) - DBUG_RETURN(0); - ERR_RETURN(err); + ERR_RETURN(dict->getNdbError()); } DBUG_PRINT("info", ("Found table %s", tab->getName())); - DBUG_RETURN(1); + DBUG_RETURN(HA_ERR_TABLE_EXIST); } @@ -5259,7 +5277,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, DBUG_PRINT("info", ("%s existed on disk", name)); // The .ndb file exists on disk, but it's not in list of tables in ndb // Verify that handler agrees table is gone. - if (ndbcluster_table_exists_in_engine(thd, db, file_name) == 0) + if (ndbcluster_table_exists_in_engine(thd, db, file_name) == HA_ERR_NO_SUCH_TABLE) { DBUG_PRINT("info", ("NDB says %s does not exists", file_name)); it.remove(); @@ -6696,7 +6714,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) DBUG_RETURN(NULL); } - List<NDB_SHARE> util_open_tables; + uint share_list_size= 0; + NDB_SHARE **share_list= NULL; set_timespec(abstime, 0); for (;;) { @@ -6726,7 +6745,22 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) /* Lock mutex and fill list with pointers to all open tables */ NDB_SHARE *share; pthread_mutex_lock(&ndbcluster_mutex); - for (uint i= 0; i < ndbcluster_open_tables.records; i++) + uint i, record_count= ndbcluster_open_tables.records; + if (share_list_size < record_count) + { + NDB_SHARE ** new_share_list= new NDB_SHARE * [record_count]; + if (!new_share_list) + { + sql_print_warning("ndb util thread: malloc failure, " + "query cache not maintained properly"); + pthread_mutex_unlock(&ndbcluster_mutex); + goto next; // At least do not crash + } + delete [] share_list; + share_list_size= record_count; + share_list= new_share_list; + } + for (i= 0; i < record_count; i++) { share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); share->use_count++; /* Make sure the table can't be closed */ @@ -6735,14 +6769,14 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) i, share->table_name, share->use_count)); /* Store pointer to table */ - util_open_tables.push_back(share); + share_list[i]= share; } pthread_mutex_unlock(&ndbcluster_mutex); - /* Iterate through the open files list */ - List_iterator_fast<NDB_SHARE> it(util_open_tables); - while ((share= it++)) + /* Iterate through the open files list */ + for (i= 0; i < record_count; i++) { + share= share_list[i]; /* Split tab- and dbname */ char buf[FN_REFLEN]; char *tabname, *db; @@ -6791,10 +6825,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) /* Decrease the use count and possibly free share */ free_share(share); } - - /* Clear the list of open tables */ - util_open_tables.empty(); - +next: /* Calculate new time to wake up */ int secs= 0; int msecs= ndb_cache_check_time; @@ -6817,6 +6848,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) } } + if (share_list) + delete [] share_list; thd->cleanup(); delete thd; delete ndb; @@ -7114,7 +7147,7 @@ void ndb_serialize_cond(const Item *item, void *arg) Check that the field is part of the table of the handler instance and that we expect a field with of this result type. */ - if (context->table == field->table) + if (context->table->s == field->table->s) { const NDBTAB *tab= (const NDBTAB *) context->ndb_table; DBUG_PRINT("info", ("FIELD_ITEM")); |