diff options
-rw-r--r-- | sql/ha_ndbcluster.cc | 72 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 5 |
2 files changed, 70 insertions, 7 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4b4637e0110..7a54190b9cb 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1044,8 +1044,16 @@ int ha_ndbcluster::write_row(byte *record) to NoCommit the transaction between each row. Find out how this is detected! */ - if (trans->execute(NoCommit) != 0) - DBUG_RETURN(ndb_err(trans)); + rows_inserted++; + if ((rows_inserted % bulk_insert_rows) == 0) + { + // Send rows to NDB + DBUG_PRINT("info", ("Sending inserts to NDB, "\ + "rows_inserted:%d, bulk_insert_rows: %d", + rows_inserted, bulk_insert_rows)); + if (trans->execute(NoCommit) != 0) + DBUG_RETURN(ndb_err(trans)); + } DBUG_RETURN(0); } @@ -1794,6 +1802,53 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_RETURN(0); } +/* + Start of an insert, remember number of rows to be inserted, it will + be used in write_row and get_autoincrement to send an optimal number + of rows in each roundtrip to the server + + SYNOPSIS + rows number of rows to insert, 0 if unknown + +*/ + +void ha_ndbcluster::start_bulk_insert(ha_rows rows) +{ + int bytes, batch; + const NDBTAB *tab= (NDBTAB *) m_table; + + DBUG_ENTER("start_bulk_insert"); + DBUG_PRINT("enter", ("rows: %d", rows)); + + rows_inserted= 0; + rows_to_insert= rows; + + /* + Calculate how many rows that should be inserted + per roundtrip to NDB. This is done in order to minimize the + number of roundtrips as much as possible. However performance will + degrade if too many bytes are inserted, thus it's limited by this + calculation. + */ + bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); + batch= (1024*256); // 1024 rows, with size 256 + batch= batch/bytes; // + batch= batch == 0 ? 1 : batch; + DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes)); + bulk_insert_rows= batch; + + DBUG_VOID_RETURN; +} + +/* + End of an insert + */ +int ha_ndbcluster::end_bulk_insert() +{ + DBUG_ENTER("end_bulk_insert"); + DBUG_RETURN(0); +} + int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size) { @@ -2469,10 +2524,10 @@ int ndbcluster_drop_database(const char *path) longlong ha_ndbcluster::get_auto_increment() -{ - // NOTE If number of values to be inserted is known - // the autoincrement cache could be used here - Uint64 auto_value= m_ndb->getAutoIncrementValue(m_tabname); +{ + int cache_size = rows_to_insert ? rows_to_insert : 32; + Uint64 auto_value= + m_ndb->getAutoIncrementValue(m_tabname, cache_size); return (longlong)auto_value; } @@ -2495,7 +2550,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): HA_NO_BLOBS | HA_DROP_BEFORE_CREATE | HA_NOT_READ_AFTER_KEY), - m_use_write(false) + m_use_write(false), + rows_to_insert(0), + rows_inserted(0), + bulk_insert_rows(1024) { int i; diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index e524cd8e4b3..f9109244492 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -132,6 +132,8 @@ class ha_ndbcluster: public handler const byte *end_key,uint end_key_len, enum ha_rkey_function end_search_flag); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); static Ndb* seize_ndb(); static void release_ndb(Ndb* ndb); @@ -206,6 +208,9 @@ class ha_ndbcluster: public handler const char* m_unique_index_name[MAX_KEY]; NdbRecAttr *m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; bool m_use_write; + ha_rows rows_to_insert; + ha_rows rows_inserted; + ha_rows bulk_insert_rows; }; bool ndbcluster_init(void); |