summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
authormagnus@neptunus.(none) <>2004-04-30 13:42:47 +0200
committermagnus@neptunus.(none) <>2004-04-30 13:42:47 +0200
commit970624c2070913579308fd7434c7098facae7e65 (patch)
tree96bc6aa795e9d00fddc66e0c1f0974c0c58fdcc7 /sql/ha_ndbcluster.cc
parentc3df24362bab7604f3b5e0c77b9f2fa7f861fbb9 (diff)
parenteea0069e6afe02b4a292f0df2cde883e0a79c8f5 (diff)
downloadmariadb-git-970624c2070913579308fd7434c7098facae7e65.tar.gz
Merge neptunus.(none):/home/magnus/mysql-4.1
into neptunus.(none):/home/magnus/mysql-4.1-insert
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc72
1 files changed, 65 insertions, 7 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 4b4637e0110..7a54190b9cb 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1044,8 +1044,16 @@ int ha_ndbcluster::write_row(byte *record)
to NoCommit the transaction between each row.
Find out how this is detected!
*/
- if (trans->execute(NoCommit) != 0)
- DBUG_RETURN(ndb_err(trans));
+ rows_inserted++;
+ if ((rows_inserted % bulk_insert_rows) == 0)
+ {
+ // Send rows to NDB
+ DBUG_PRINT("info", ("Sending inserts to NDB, "\
+ "rows_inserted:%d, bulk_insert_rows: %d",
+ rows_inserted, bulk_insert_rows));
+ if (trans->execute(NoCommit) != 0)
+ DBUG_RETURN(ndb_err(trans));
+ }
DBUG_RETURN(0);
}
@@ -1794,6 +1802,53 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_RETURN(0);
}
+/*
+ Start of an insert, remember number of rows to be inserted, it will
+ be used in write_row and get_autoincrement to send an optimal number
+ of rows in each roundtrip to the server
+
+ SYNOPSIS
+ rows number of rows to insert, 0 if unknown
+
+*/
+
+void ha_ndbcluster::start_bulk_insert(ha_rows rows)
+{
+ int bytes, batch;
+ const NDBTAB *tab= (NDBTAB *) m_table;
+
+ DBUG_ENTER("start_bulk_insert");
+ DBUG_PRINT("enter", ("rows: %d", rows));
+
+ rows_inserted= 0;
+ rows_to_insert= rows;
+
+ /*
+ Calculate how many rows that should be inserted
+ per roundtrip to NDB. This is done in order to minimize the
+ number of roundtrips as much as possible. However performance will
+ degrade if too many bytes are inserted, thus it's limited by this
+ calculation.
+ */
+ bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns();
+ batch= (1024*256); // 1024 rows, with size 256
+ batch= batch/bytes; //
+ batch= batch == 0 ? 1 : batch;
+ DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes));
+ bulk_insert_rows= batch;
+
+ DBUG_VOID_RETURN;
+}
+
+/*
+ End of an insert
+ */
+int ha_ndbcluster::end_bulk_insert()
+{
+ DBUG_ENTER("end_bulk_insert");
+ DBUG_RETURN(0);
+}
+
int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size)
{
@@ -2469,10 +2524,10 @@ int ndbcluster_drop_database(const char *path)
longlong ha_ndbcluster::get_auto_increment()
-{
- // NOTE If number of values to be inserted is known
- // the autoincrement cache could be used here
- Uint64 auto_value= m_ndb->getAutoIncrementValue(m_tabname);
+{
+ int cache_size = rows_to_insert ? rows_to_insert : 32;
+ Uint64 auto_value=
+ m_ndb->getAutoIncrementValue(m_tabname, cache_size);
return (longlong)auto_value;
}
@@ -2495,7 +2550,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_BLOBS |
HA_DROP_BEFORE_CREATE |
HA_NOT_READ_AFTER_KEY),
- m_use_write(false)
+ m_use_write(false),
+ rows_to_insert(0),
+ rows_inserted(0),
+ bulk_insert_rows(1024)
{
int i;