summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorunknown <pekka@mysql.com>2005-09-15 02:33:28 +0200
committerunknown <pekka@mysql.com>2005-09-15 02:33:28 +0200
commit9a7ec9517c8b371d39ac65b74aa00f180f8c17d0 (patch)
treef35bfbf589887642a59e75b7c082269a709cd86e /storage
parente89acaa2d83962da66c1591fba3720a8e36d6e49 (diff)
downloadmariadb-git-9a7ec9517c8b371d39ac65b74aa00f180f8c17d0.tar.gz
ndb - wl#2624 re-commit due to bk problem
mysql-test/r/ndb_basic.result: wl#2624 re-commit due to bk problem mysql-test/r/ndb_blob.result: wl#2624 re-commit due to bk problem mysql-test/r/ndb_charset.result: wl#2624 re-commit due to bk problem mysql-test/r/ndb_condition_pushdown.result: wl#2624 re-commit due to bk problem mysql-test/r/ndb_index_ordered.result: wl#2624 re-commit due to bk problem mysql-test/t/ndb_index_ordered.test: wl#2624 re-commit due to bk problem sql/ha_ndbcluster.cc: wl#2624 re-commit due to bk problem sql/ha_ndbcluster.h: wl#2624 re-commit due to bk problem sql/mysqld.cc: wl#2624 re-commit due to bk problem sql/set_var.cc: wl#2624 re-commit due to bk problem sql/sql_class.h: wl#2624 re-commit due to bk problem storage/ndb/include/kernel/AttributeHeader.hpp: wl#2624 re-commit due to bk problem storage/ndb/include/ndbapi/NdbDictionary.hpp: wl#2624 re-commit due to bk problem storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/kernel/blocks/dbtux/Makefile.am: wl#2624 re-commit due to bk problem storage/ndb/src/ndbapi/Makefile.am: wl#2624 re-commit due to bk problem storage/ndb/src/ndbapi/NdbDictionary.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp: wl#2624 re-commit due to bk problem storage/ndb/src/ndbapi/NdbScanOperation.cpp: wl#2624 re-commit due to bk problem
Diffstat (limited to 'storage')
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp2
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp2
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp3
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexStat.hpp141
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp13
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp12
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp17
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp160
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp91
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Makefile.am1
-rw-r--r--storage/ndb/src/ndbapi/Makefile.am1
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp1
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp11
-rw-r--r--storage/ndb/src/ndbapi/NdbIndexStat.cpp490
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp25
16 files changed, 929 insertions, 45 deletions
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
index 7d89219b8b2..21b13472c37 100644
--- a/storage/ndb/include/kernel/AttributeHeader.hpp
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -41,6 +41,8 @@ public:
STATIC_CONST( ROW_SIZE = 0xFFFA );
STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
+ STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 );
+
/** Initialize AttributeHeader at location aHeaderPtr */
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
Uint32 aDataSize);
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 870af671959..00fe709677f 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -456,6 +456,7 @@ public:
static const Column * COMMIT_COUNT;
static const Column * ROW_SIZE;
static const Column * RANGE_NO;
+ static const Column * RECORDS_IN_RANGE;
int getSizeInBytes() const;
#endif
@@ -929,6 +930,7 @@ public:
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbIndexImpl;
+ friend class NdbIndexStat;
#endif
class NdbIndexImpl & m_impl;
Index(NdbIndexImpl&);
diff --git a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index c231b927581..fd7e9f2d05c 100644
--- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -30,6 +30,7 @@ class NdbIndexScanOperation : public NdbScanOperation {
friend class NdbResultSet;
friend class NdbOperation;
friend class NdbScanOperation;
+ friend class NdbIndexStat;
#endif
public:
@@ -149,12 +150,14 @@ public:
* Is current scan sorted descending
*/
bool getDescending() const { return m_descending; }
+
private:
NdbIndexScanOperation(Ndb* aNdb);
virtual ~NdbIndexScanOperation();
int setBound(const NdbColumnImpl*, int type, const void* aValue, Uint32 len);
int insertBOUNDS(Uint32 * data, Uint32 sz);
+ Uint32 getKeyFromSCANTABREQ(Uint32* data, Uint32 size);
virtual int equal_impl(const NdbColumnImpl*, const char*, Uint32);
virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*);
diff --git a/storage/ndb/include/ndbapi/NdbIndexStat.hpp b/storage/ndb/include/ndbapi/NdbIndexStat.hpp
new file mode 100644
index 00000000000..7666166b657
--- /dev/null
+++ b/storage/ndb/include/ndbapi/NdbIndexStat.hpp
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NdbIndexStat_H
+#define NdbIndexStat_H
+
+#include <ndb_global.h>
+#include <NdbDictionary.hpp>
+#include <NdbError.hpp>
+class NdbIndexImpl;
+class NdbIndexScanOperation;
+
+/*
+ * Statistics for an ordered index.
+ */
+class NdbIndexStat {
+public:
+ NdbIndexStat(const NdbDictionary::Index* index);
+ ~NdbIndexStat();
+ /*
+ * Allocate memory for cache. Argument is minimum number of stat
+ * entries and applies to lower and upper bounds separately. More
+ * entries may fit (keys have variable size). If not used, db is
+ * contacted always.
+ */
+ int alloc_cache(Uint32 entries);
+ /*
+ * Flags for records_in_range.
+ */
+ enum {
+ RR_UseDb = 1, // contact db
+ RR_NoUpdate = 2 // but do not update cache
+ };
+ /*
+ * Estimate how many index records need to be scanned. The scan
+ * operation must be prepared with lock mode LM_CommittedRead and must
+ * have the desired bounds set. The routine may use local cache or
+ * may contact db by executing the operation.
+ *
+ * If returned count is zero then db was contacted and the count is
+ * exact. Otherwise the count is approximate. If cache is used then
+ * caller must provide estimated number of table rows. It will be
+ * multiplied by a percentage obtained from the cache (result zero is
+ * returned as 1).
+ */
+ int records_in_range(NdbDictionary::Index* index,
+ NdbIndexScanOperation* op,
+ Uint64 table_rows,
+ Uint64* count,
+ int flags);
+ /*
+ * Get latest error.
+ */
+ const NdbError& getNdbError() const;
+
+private:
+ /*
+ * There are 2 areas: start keys and end keys. An area has pointers
+ * at beginning and entries at end. Pointers are sorted by key.
+ *
+ * A pointer contains entry offset and also entry timestamp. An entry
+ * contains the key and percentage of rows _not_ satisfying the bound
+ * i.e. less than start key or greater than end key.
+ *
+ * A key is an array of index key bounds. Each has type (0-4) in
+ * first word followed by data with AttributeHeader.
+ *
+ * Stat update comes as pair of start and end key and associated
+ * percentages. Stat query takes best match of start and end key from
+ * each area separately. Rows in range percentage is then computed by
+ * excluding the two i.e. as 100 - (start key pct + end key pct).
+ *
+ * TODO use more compact key format
+ */
+ friend struct Area;
+ struct Pointer {
+ Uint16 m_pos;
+ Uint16 m_seq;
+ };
+ struct Entry {
+ float m_pct;
+ Uint32 m_keylen;
+ };
+ STATIC_CONST( EntrySize = sizeof(Entry) >> 2 );
+ STATIC_CONST( PointerSize = sizeof(Pointer) >> 2 );
+ struct Area {
+ Uint32* m_data;
+ Uint32 m_offset;
+ Uint32 m_free;
+ Uint16 m_entries;
+ Uint8 m_idir;
+ Uint8 pad1;
+ Pointer& get_pointer(unsigned i) const {
+ return *(Pointer*)&m_data[i];
+ }
+ Entry& get_entry(unsigned i) const {
+ return *(Entry*)&m_data[get_pointer(i).m_pos];
+ }
+ Uint32 get_pos(const Entry& e) const {
+ return (const Uint32*)&e - m_data;
+ }
+ unsigned get_firstpos() const {
+ return PointerSize * m_entries + m_free;
+ }
+ };
+ const NdbIndexImpl& m_index;
+ Uint32 m_areasize;
+ Uint16 m_seq;
+ Area m_area[2];
+ Uint32* m_cache;
+ NdbError m_error;
+#ifdef VM_TRACE
+ void stat_verify();
+#endif
+ int stat_cmpkey(const Area& a, const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2);
+ int stat_search(const Area& a, const Uint32* key, Uint32 keylen,
+ Uint32* idx, bool* match);
+ int stat_oldest(const Area& a);
+ int stat_delete(Area& a, Uint32 k);
+ int stat_update(const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2, const float pct[2]);
+ int stat_select(const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2, float pct[2]);
+ void set_error(int code);
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 538850c4fb1..42688796801 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -2622,7 +2622,10 @@ Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
regTcPtr.i = signal->theData[0];
ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
- if(signal->theData[1] != AttributeHeader::RANGE_NO)
+ if (signal->theData[1] == AttributeHeader::RANGE_NO) {
+ signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ }
+ else if (signal->theData[1] != AttributeHeader::RECORDS_IN_RANGE)
{
jam();
FragrecordPtr regFragptr;
@@ -2634,7 +2637,13 @@ Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
}
else
{
- signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ jam();
+ // scanptr gets reset somewhere within the timeslice
+ ScanRecordPtr tmp;
+ tmp.i = regTcPtr.p->tcScanRec;
+ c_scanRecordPool.getPtr(tmp);
+ signal->theData[0] = tmp.p->scanAccPtr;
+ EXECUTE_DIRECT(DBTUX, GSN_READ_PSEUDO_REQ, signal, 2);
}
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index 535ff50bcd5..df3f06f4594 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -1028,6 +1028,18 @@ Dbtup::read_pseudo(Uint32 attrId, Uint32* outBuffer){
EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
return 1;
+
+ case AttributeHeader::RECORDS_IN_RANGE:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ outBuffer[1] = signal->theData[1];
+ outBuffer[2] = signal->theData[2];
+ outBuffer[3] = signal->theData[3];
+ return 4;
+
default:
return 0;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index d4a44b9e641..b34fd5151c2 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -82,10 +82,14 @@
#define jam() jamLine(80000 + __LINE__)
#define jamEntry() jamEntryLine(80000 + __LINE__)
#endif
-#ifdef DBTUX_DEBUG_CPP
+#ifdef DBTUX_STAT_CPP
#define jam() jamLine(90000 + __LINE__)
#define jamEntry() jamEntryLine(90000 + __LINE__)
#endif
+#ifdef DBTUX_DEBUG_CPP
+#define jam() jamLine(100000 + __LINE__)
+#define jamEntry() jamEntryLine(100000 + __LINE__)
+#endif
#ifndef jam
#define jam() jamLine(__LINE__)
#define jamEntry() jamEntryLine(__LINE__)
@@ -116,6 +120,7 @@ private:
STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
STATIC_CONST( ScanBoundSegmentSize = 7 );
STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
+ STATIC_CONST( MaxTreeDepth = 32 ); // strict
BLOCK_DEFINES(Dbtux);
// forward declarations
@@ -269,6 +274,7 @@ private:
Uint8 m_prefSize; // words in min prefix
Uint8 m_minOccup; // min entries in internal node
Uint8 m_maxOccup; // max entries in node
+ Uint32 m_entryCount; // stat: current entries
TupLoc m_root; // root node
TreeHead();
// methods
@@ -660,6 +666,14 @@ private:
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
/*
+ * DbtuxStat.cpp
+ */
+ void execREAD_PSEUDO_REQ(Signal* signal);
+ void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out);
+ Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir);
+ unsigned getPathToNode(NodeHandle node, Uint16* path);
+
+ /*
* DbtuxDebug.cpp
*/
void execDUMP_STATE_ORD(Signal* signal);
@@ -952,6 +966,7 @@ Dbtux::TreeHead::TreeHead() :
m_prefSize(0),
m_minOccup(0),
m_maxOccup(0),
+ m_entryCount(0),
m_root()
{
}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index 5640fdf2899..7c7d762d1e9 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -66,6 +66,10 @@ Dbtux::Dbtux(const Configuration& conf) :
addRecSignal(GSN_ACCKEYREF, &Dbtux::execACCKEYREF);
addRecSignal(GSN_ACC_ABORTCONF, &Dbtux::execACC_ABORTCONF);
/*
+ * DbtuxStat.cpp
+ */
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dbtux::execREAD_PSEUDO_REQ);
+ /*
* DbtuxDebug.cpp
*/
addRecSignal(GSN_DUMP_STATE_ORD, &Dbtux::execDUMP_STATE_ORD);
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
new file mode 100644
index 00000000000..23fb409b63c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
@@ -0,0 +1,160 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_STAT_CPP
+#include "Dbtux.hpp"
+
+void
+Dbtux::execREAD_PSEUDO_REQ(Signal* signal)
+{
+ jamEntry();
+ ScanOpPtr scanPtr;
+ scanPtr.i = signal->theData[0];
+ c_scanOpPool.getPtr(scanPtr);
+ if (signal->theData[1] == AttributeHeader::RECORDS_IN_RANGE) {
+ jam();
+ statRecordsInRange(scanPtr, &signal->theData[0]);
+ } else {
+ ndbassert(false);
+ }
+}
+
+/*
+ * Estimate entries in range. Scan is at first entry. Search for last
+ * entry i.e. start of descending scan. Use the 2 positions to estimate
+ * entries before and after the range. Finally get entries in range by
+ * subtracting from total. Errors come from imperfectly balanced tree
+ * and from uncommitted entries which differ only in tuple version.
+ *
+ * Returns 4 Uint32 values: 0) total entries 1) in range 2) before range
+ * 3) after range. 1-3) are estimates and need not add up to 0).
+ */
+void
+Dbtux::statRecordsInRange(ScanOpPtr scanPtr, Uint32* out)
+{
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+ TreeHead& tree = frag.m_tree;
+ // get first and last position
+ TreePos pos1 = scan.m_scanPos;
+ TreePos pos2;
+ { // as in scanFirst()
+ TreeHead& tree = frag.m_tree;
+ setKeyAttrs(frag);
+ const unsigned idir = 1;
+ const ScanBound& bound = *scan.m_bound[idir];
+ ScanBoundIterator iter;
+ bound.first(iter);
+ for (unsigned j = 0; j < bound.getSize(); j++) {
+ jam();
+ c_dataBuffer[j] = *iter.data;
+ bound.next(iter);
+ }
+ searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], true, pos2);
+ // committed read (same timeslice) and range not empty
+ ndbrequire(pos2.m_loc != NullTupLoc);
+ }
+ out[0] = frag.m_tree.m_entryCount;
+ out[2] = getEntriesBeforeOrAfter(frag, pos1, 0);
+ out[3] = getEntriesBeforeOrAfter(frag, pos2, 1);
+ if (pos1.m_loc == pos2.m_loc) {
+ ndbrequire(pos2.m_pos >= pos1.m_pos);
+ out[1] = pos2.m_pos - pos1.m_pos + 1;
+ } else {
+ Uint32 rem = out[2] + out[3];
+ if (out[0] > rem) {
+ out[1] = out[0] - rem;
+ } else {
+ // random guess one node apart
+ out[1] = tree.m_maxOccup;
+ }
+ }
+}
+
+/*
+ * Estimate number of entries strictly before or after given position.
+ * Each branch to right direction wins parent node and the subtree on
+ * the other side. Subtree entries is estimated from depth and total
+ * entries by assuming that the tree is perfectly balanced.
+ */
+Uint32
+Dbtux::getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir)
+{
+ NodeHandle node(frag);
+ selectNode(node, pos.m_loc);
+ Uint16 path[MaxTreeDepth + 1];
+ unsigned depth = getPathToNode(node, path);
+ ndbrequire(depth != 0 && depth <= MaxTreeDepth);
+ TreeHead& tree = frag.m_tree;
+ Uint32 cnt = 0;
+ Uint32 tot = tree.m_entryCount;
+ unsigned i = 0;
+ // contribution from levels above
+ while (i + 1 < depth) {
+ unsigned occup2 = (path[i] >> 8);
+ unsigned side = (path[i + 1] & 0xFF);
+ // subtree of this node has about half the entries
+ tot = tot >= occup2 ? (tot - occup2) / 2 : 0;
+ // branch to other side wins parent and a subtree
+ if (side != idir) {
+ cnt += occup2;
+ cnt += tot;
+ }
+ i++;
+ }
+ // contribution from this node
+ unsigned occup = (path[i] >> 8);
+ ndbrequire(pos.m_pos < occup);
+ if (idir == 0) {
+ if (pos.m_pos != 0)
+ cnt += pos.m_pos - 1;
+ } else {
+ cnt += occup - (pos.m_pos + 1);
+ }
+ // contribution from levels below
+ tot = tot >= occup ? (tot - occup) / 2 : 0;
+ cnt += tot;
+ return cnt;
+}
+
+/*
+ * Construct path to given node. Returns depth. Root node has path
+ * 2 and depth 1. In general the path is 2{0,1}* where 0,1 is the side
+ * (left,right branch). In addition the occupancy of each node is
+ * returned in the upper 8 bits.
+ */
+unsigned
+Dbtux::getPathToNode(NodeHandle node, Uint16* path)
+{
+ TupLoc loc = node.m_loc;
+ unsigned i = MaxTreeDepth;
+ while (loc != NullTupLoc) {
+ jam();
+ selectNode(node, loc);
+ path[i] = node.getSide() | (node.getOccup() << 8);
+ loc = node.getLink(2);
+ ndbrequire(i != 0);
+ i--;
+ }
+ unsigned depth = MaxTreeDepth - i;
+ unsigned j = 0;
+ while (j < depth) {
+ path[j] = path[i + 1 + j];
+ j++;
+ }
+ path[j] = 0xFFFF; // catch bug
+ return depth;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 5107a8d8e31..cc2725c4d89 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -26,25 +26,29 @@ Dbtux::treeAdd(Frag& frag, TreePos treePos, TreeEnt ent)
{
TreeHead& tree = frag.m_tree;
NodeHandle node(frag);
- if (treePos.m_loc != NullTupLoc) {
- // non-empty tree
- jam();
- selectNode(node, treePos.m_loc);
- unsigned pos = treePos.m_pos;
- if (node.getOccup() < tree.m_maxOccup) {
- // node has room
+ do {
+ if (treePos.m_loc != NullTupLoc) {
+ // non-empty tree
jam();
- nodePushUp(node, pos, ent, RNIL);
- return;
+ selectNode(node, treePos.m_loc);
+ unsigned pos = treePos.m_pos;
+ if (node.getOccup() < tree.m_maxOccup) {
+ // node has room
+ jam();
+ nodePushUp(node, pos, ent, RNIL);
+ break;
+ }
+ treeAddFull(frag, node, pos, ent);
+ break;
}
- treeAddFull(frag, node, pos, ent);
- return;
- }
- jam();
- insertNode(node);
- nodePushUp(node, 0, ent, RNIL);
- node.setSide(2);
- tree.m_root = node.m_loc;
+ jam();
+ insertNode(node);
+ nodePushUp(node, 0, ent, RNIL);
+ node.setSide(2);
+ tree.m_root = node.m_loc;
+ break;
+ } while (0);
+ tree.m_entryCount++;
}
/*
@@ -178,31 +182,36 @@ Dbtux::treeRemove(Frag& frag, TreePos treePos)
NodeHandle node(frag);
selectNode(node, treePos.m_loc);
TreeEnt ent;
- if (node.getOccup() > tree.m_minOccup) {
- // no underflow in any node type
- jam();
+ do {
+ if (node.getOccup() > tree.m_minOccup) {
+ // no underflow in any node type
+ jam();
+ nodePopDown(node, pos, ent, 0);
+ break;
+ }
+ if (node.getChilds() == 2) {
+ // underflow in interior node
+ jam();
+ treeRemoveInner(frag, node, pos);
+ break;
+ }
+ // remove entry in semi/leaf
nodePopDown(node, pos, ent, 0);
- return;
- }
- if (node.getChilds() == 2) {
- // underflow in interior node
- jam();
- treeRemoveInner(frag, node, pos);
- return;
- }
- // remove entry in semi/leaf
- nodePopDown(node, pos, ent, 0);
- if (node.getLink(0) != NullTupLoc) {
- jam();
- treeRemoveSemi(frag, node, 0);
- return;
- }
- if (node.getLink(1) != NullTupLoc) {
- jam();
- treeRemoveSemi(frag, node, 1);
- return;
- }
- treeRemoveLeaf(frag, node);
+ if (node.getLink(0) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 0);
+ break;
+ }
+ if (node.getLink(1) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 1);
+ break;
+ }
+ treeRemoveLeaf(frag, node);
+ break;
+ } while (0);
+ ndbrequire(tree.m_entryCount != 0);
+ tree.m_entryCount--;
}
/*
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
index 12d450e8632..41eefaf0c3e 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
+++ b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -9,6 +9,7 @@ libdbtux_a_SOURCES = \
DbtuxScan.cpp \
DbtuxSearch.cpp \
DbtuxCmp.cpp \
+ DbtuxStat.cpp \
DbtuxDebug.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/kernel/blocks/dbtup
diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am
index a4a0b8098a2..12590b6b8a5 100644
--- a/storage/ndb/src/ndbapi/Makefile.am
+++ b/storage/ndb/src/ndbapi/Makefile.am
@@ -35,6 +35,7 @@ libndbapi_la_SOURCES = \
DictCache.cpp \
ndb_cluster_connection.cpp \
NdbBlob.cpp \
+ NdbIndexStat.cpp \
SignalSender.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 0d464c6d412..2a6cf07b2ca 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -1070,3 +1070,4 @@ const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
const NdbDictionary::Column * NdbDictionary::Column::RANGE_NO = 0;
+const NdbDictionary::Column * NdbDictionary::Column::RECORDS_IN_RANGE = 0;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 529ba09207b..f7b23200223 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -266,6 +266,11 @@ NdbColumnImpl::create_pseudo(const char * name){
col->m_impl.m_attrId = AttributeHeader::RANGE_NO;
col->m_impl.m_attrSize = 4;
col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$RECORDS_IN_RANGE")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::RECORDS_IN_RANGE;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 4;
} else {
abort();
}
@@ -739,12 +744,14 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
delete NdbDictionary::Column::COMMIT_COUNT;
delete NdbDictionary::Column::ROW_SIZE;
delete NdbDictionary::Column::RANGE_NO;
+ delete NdbDictionary::Column::RECORDS_IN_RANGE;
NdbDictionary::Column::FRAGMENT= 0;
NdbDictionary::Column::FRAGMENT_MEMORY= 0;
NdbDictionary::Column::ROW_COUNT= 0;
NdbDictionary::Column::COMMIT_COUNT= 0;
NdbDictionary::Column::ROW_SIZE= 0;
NdbDictionary::Column::RANGE_NO= 0;
+ NdbDictionary::Column::RECORDS_IN_RANGE= 0;
}
m_globalHash->unlock();
} else {
@@ -817,6 +824,8 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
NdbColumnImpl::create_pseudo("NDB$ROW_SIZE");
NdbDictionary::Column::RANGE_NO=
NdbColumnImpl::create_pseudo("NDB$RANGE_NO");
+ NdbDictionary::Column::RECORDS_IN_RANGE=
+ NdbColumnImpl::create_pseudo("NDB$RECORDS_IN_RANGE");
}
m_globalHash->unlock();
return true;
@@ -2175,6 +2184,7 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
}
* dst = idx;
+
return 0;
}
@@ -3209,4 +3219,3 @@ template class Vector<Uint32>;
template class Vector<Vector<Uint32> >;
template class Vector<NdbTableImpl*>;
template class Vector<NdbColumnImpl*>;
-
diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp
new file mode 100644
index 00000000000..3f46d7909cd
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp
@@ -0,0 +1,490 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <AttributeHeader.hpp>
+#include <NdbSqlUtil.hpp>
+#include <NdbIndexStat.hpp>
+#include <NdbTransaction.hpp>
+#include <NdbIndexScanOperation.hpp>
+#include "NdbDictionaryImpl.hpp"
+#include <my_sys.h>
+
+NdbIndexStat::NdbIndexStat(const NdbDictionary::Index* index) :
+ m_index(index->m_impl),
+ m_cache(NULL)
+{
+}
+
+NdbIndexStat::~NdbIndexStat()
+{
+ delete [] m_cache;
+ m_cache = NULL;
+}
+
+int
+NdbIndexStat::alloc_cache(Uint32 entries)
+{
+ delete [] m_cache;
+ m_cache = NULL;
+ if (entries == 0) {
+ return 0;
+ }
+ Uint32 i;
+ Uint32 keysize = 0;
+ for (i = 0; i < m_index.m_columns.size(); i++) {
+ NdbColumnImpl* c = m_index.m_columns[i];
+ keysize += 2; // counting extra headers
+ keysize += (c->m_attrSize * c->m_arraySize + 3 ) / 4;
+ }
+ Uint32 areasize = entries * (PointerSize + EntrySize + keysize);
+ if (areasize > (1 << 16))
+ areasize = (1 << 16);
+ Uint32 cachesize = 2 * areasize;
+ m_cache = new Uint32 [cachesize];
+ if (m_cache == NULL) {
+ set_error(4000);
+ return -1;
+ }
+ m_areasize = areasize;
+ m_seq = 0;
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ a.m_data = &m_cache[idir * areasize];
+ a.m_offset = a.m_data - &m_cache[0];
+ a.m_free = areasize;
+ a.m_entries = 0;
+ a.m_idir = idir;
+ a.pad1 = 0;
+ }
+#ifdef VM_TRACE
+ memset(&m_cache[0], 0x3f, cachesize << 2);
+#endif
+ return 0;
+}
+
+#ifndef VM_TRACE
+#define stat_verify()
+#else
+void
+NdbIndexStat::stat_verify()
+{
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Uint32 i;
+ const Area& a = m_area[idir];
+ assert(a.m_offset == idir * m_areasize);
+ assert(a.m_data == &m_cache[a.m_offset]);
+ Uint32 pointerwords = PointerSize * a.m_entries;
+ Uint32 entrywords = 0;
+ for (i = 0; i < a.m_entries; i++) {
+ const Pointer& p = a.get_pointer(i);
+ const Entry& e = a.get_entry(i);
+ assert(a.get_pos(e) == p.m_pos);
+ entrywords += EntrySize + e.m_keylen;
+ }
+ assert(a.m_free <= m_areasize);
+ assert(pointerwords + a.m_free + entrywords == m_areasize);
+ Uint32 off = pointerwords + a.m_free;
+ for (i = 0; i < a.m_entries; i++) {
+ assert(off < m_areasize);
+ const Entry& e = *(const Entry*)&a.m_data[off];
+ off += EntrySize + e.m_keylen;
+ }
+ assert(off == m_areasize);
+ for (i = 0; i < a.m_entries; i++) {
+ const Entry& e = a.get_entry(i);
+ const Uint32* entrykey = (const Uint32*)&e + EntrySize;
+ Uint32 n = 0;
+ while (n + 2 <= e.m_keylen) {
+ Uint32 t = entrykey[n++];
+ assert(t == 2 * idir || t == 2 * idir + 1 || t == 4);
+ AttributeHeader ah = *(const AttributeHeader*)&entrykey[n++];
+ n += ah.getDataSize();
+ }
+ assert(n == e.m_keylen);
+ }
+ for (i = 0; i + 1 < a.m_entries; i++) {
+ const Entry& e1 = a.get_entry(i);
+ const Entry& e2 = a.get_entry(i + 1);
+ const Uint32* entrykey1 = (const Uint32*)&e1 + EntrySize;
+ const Uint32* entrykey2 = (const Uint32*)&e2 + EntrySize;
+ int ret = stat_cmpkey(a, entrykey1, e1.m_keylen, entrykey2, e2.m_keylen);
+ assert(ret == -1);
+ }
+ }
+}
+#endif
+
+// compare keys
+int
+NdbIndexStat::stat_cmpkey(const Area& a, const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2)
+{
+ const Uint32 idir = a.m_idir;
+ const int jdir = 1 - 2 * int(idir);
+ Uint32 i1 = 0, i2 = 0;
+ Uint32 t1 = 4, t2 = 4; //BoundEQ
+ int ret = 0;
+ Uint32 k = 0;
+ while (k < m_index.m_columns.size()) {
+ NdbColumnImpl* c = m_index.m_columns[k];
+ Uint32 n = c->m_attrSize * c->m_arraySize;
+ // absence of keypart is treated specially
+ bool havekp1 = (i1 + 2 <= keylen1);
+ bool havekp2 = (i2 + 2 <= keylen2);
+ AttributeHeader ah1;
+ AttributeHeader ah2;
+ if (havekp1) {
+ t1 = key1[i1++];
+ assert(t1 == 2 * idir || t1 == 2 * idir + 1 || t1 == 4);
+ ah1 = *(const AttributeHeader*)&key1[i1++];
+ }
+ if (havekp2) {
+ t2 = key2[i2++];
+ assert(t2 == 2 * idir || t2 == 2 * idir + 1 || t2 == 4);
+ ah2 = *(const AttributeHeader*)&key2[i2++];
+ }
+ if (havekp1) {
+ if (havekp2) {
+ if (! ah1.isNULL()) {
+ if (! ah2.isNULL()) {
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(c->m_type);
+ ret = (*sqlType.m_cmp)(c->m_cs, &key1[i1], n, &key2[i2], n, true);
+ if (ret != 0)
+ break;
+ } else {
+ ret = +1;
+ break;
+ }
+ } else if (! ah2.isNULL()) {
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = +jdir;
+ break;
+ }
+ } else {
+ if (havekp2) {
+ ret = -jdir;
+ break;
+ } else {
+ // no more keyparts on either side
+ break;
+ }
+ }
+ i1 += ah1.getDataSize();
+ i2 += ah2.getDataSize();
+ k++;
+ }
+ if (ret == 0) {
+ // strict bound is greater as start key and less as end key
+ int s1 = t1 & 1;
+ int s2 = t2 & 1;
+ ret = (s1 - s2) * jdir;
+ }
+ return ret;
+}
+
+// find first key >= given key
+int
+NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint32* idx, bool* match)
+{
+ // points at minus/plus infinity
+ int lo = -1;
+ int hi = a.m_entries;
+ // loop invariant: key(lo) < key < key(hi)
+ while (hi - lo > 1) {
+ // observe lo < j < hi
+ int j = (hi + lo) / 2;
+ Entry& e = a.get_entry(j);
+ const Uint32* key2 = (Uint32*)&e + EntrySize;
+ Uint32 keylen2 = e.m_keylen;
+ int ret = stat_cmpkey(a, key, keylen, key2, keylen2);
+ // observe the loop invariant if ret != 0
+ if (ret < 0)
+ hi = j;
+ else if (ret > 0)
+ lo = j;
+ else {
+ *idx = j;
+ *match = true;
+ return 0;
+ }
+ }
+ // hi - lo == 1 and key(lo) < key < key(hi)
+ *idx = hi;
+ *match = false;
+ return 0;
+}
+
+// find oldest entry
+int
+NdbIndexStat::stat_oldest(const Area& a)
+{
+ Uint32 i, k, m;
+ bool found = false;
+ for (i = 0; i < a.m_entries; i++) {
+ Pointer& p = a.get_pointer(i);
+ Entry& e = a.get_entry(i);
+ Uint32 m2 = m_seq >= p.m_seq ? m_seq - p.m_seq : p.m_seq - m_seq;
+ if (! found || m < m2) {
+ m = m2;
+ k = i;
+ found = true;
+ }
+ }
+ assert(found);
+ return k;
+}
+
+// delete entry
+int
+NdbIndexStat::stat_delete(Area& a, Uint32 k)
+{
+ Uint32 i;
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ Uint32 entrylen = EntrySize + e.m_keylen;
+ Uint32 pos = a.get_pos(e);
+ // adjust pointers to entries after
+ for (i = 0; i < a.m_entries; i++) {
+ Pointer& p = a.get_pointer(i);
+ if (p.m_pos < pos) {
+ p.m_pos += entrylen;
+ }
+ }
+ // compact entry area
+ unsigned firstpos = a.get_firstpos();
+ for (i = pos; i > firstpos; i--) {
+ a.m_data[i + entrylen - 1] = a.m_data[i - 1];
+ }
+ // compact pointer area
+ for (i = k; i + 1 < a.m_entries; i++) {
+ NdbIndexStat::Pointer& p = a.get_pointer(i);
+ NdbIndexStat::Pointer& q = a.get_pointer(i + 1);
+ p = q;
+ }
+ a.m_free += PointerSize + entrylen;
+ a.m_entries--;
+ stat_verify();
+ return 0;
+}
+
+// update or insert stat values
+int
+NdbIndexStat::stat_update(const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2, const float pct[2])
+{
+ const Uint32* const key[2] = { key1, key2 };
+ const Uint32 keylen[2] = { keylen1, keylen2 };
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ Uint32 k;
+ bool match;
+ stat_search(a, key[idir], keylen[idir], &k, &match);
+ Uint16 seq = m_seq++;
+ if (match) {
+ // update old entry
+ NdbIndexStat::Pointer& p = a.get_pointer(k);
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ e.m_pct = pct[idir];
+ p.m_seq = seq;
+ } else {
+ Uint32 entrylen = NdbIndexStat::EntrySize + keylen[idir];
+ Uint32 need = NdbIndexStat::PointerSize + entrylen;
+ while (need > a.m_free) {
+ Uint32 j = stat_oldest(a);
+ if (j < k)
+ k--;
+ stat_delete(a, j);
+ }
+ // insert pointer
+ Uint32 i;
+ for (i = a.m_entries; i > k; i--) {
+ NdbIndexStat::Pointer& p1 = a.get_pointer(i);
+ NdbIndexStat::Pointer& p2 = a.get_pointer(i - 1);
+ p1 = p2;
+ }
+ NdbIndexStat::Pointer& p = a.get_pointer(k);
+ // insert entry
+ Uint32 firstpos = a.get_firstpos();
+ p.m_pos = firstpos - entrylen;
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ e.m_pct = pct[idir];
+ e.m_keylen = keylen[idir];
+ Uint32* entrykey = (Uint32*)&e + EntrySize;
+ for (i = 0; i < keylen[idir]; i++) {
+ entrykey[i] = key[idir][i];
+ }
+ p.m_seq = seq;
+ // total
+ a.m_free -= PointerSize + entrylen;
+ a.m_entries++;
+ }
+ }
+ stat_verify();
+ return 0;
+}
+
+int
+NdbIndexStat::stat_select(const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2, float pct[2])
+{
+ const Uint32* const key[2] = { key1, key2 };
+ const Uint32 keylen[2] = { keylen1, keylen2 };
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ Uint32 k;
+ bool match;
+ stat_search(a, key[idir], keylen[idir], &k, &match);
+ if (match) {
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ pct[idir] = e.m_pct;
+ } else if (k == 0) {
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ if (idir == 0)
+ pct[idir] = e.m_pct / 2;
+ else
+ pct[idir] = e.m_pct + (1 - e.m_pct) / 2;
+ } else if (k == a.m_entries) {
+ NdbIndexStat::Entry& e = a.get_entry(k - 1);
+ if (idir == 0)
+ pct[idir] = e.m_pct + (1 - e.m_pct) / 2;
+ else
+ pct[idir] = e.m_pct / 2;
+ } else {
+ NdbIndexStat::Entry& e1 = a.get_entry(k - 1);
+ NdbIndexStat::Entry& e2 = a.get_entry(k);
+ pct[idir] = (e1.m_pct + e2.m_pct) / 2;
+ }
+ }
+ return 0;
+}
+
+int
+NdbIndexStat::records_in_range(NdbDictionary::Index* index, NdbIndexScanOperation* op, Uint64 table_rows, Uint64* count, int flags)
+{
+ DBUG_ENTER("NdbIndexStat::records_in_range");
+ Uint64 rows;
+ Uint32 key1[1000], keylen1;
+ Uint32 key2[1000], keylen2;
+
+ if (m_cache == NULL)
+ flags |= RR_UseDb | RR_NoUpdate;
+ else if (m_area[0].m_entries == 0 || m_area[1].m_entries == 0)
+ flags |= RR_UseDb;
+
+ if ((flags & (RR_UseDb | RR_NoUpdate)) != RR_UseDb | RR_NoUpdate) {
+ // get start and end key - assume bound is ordered, wellformed
+ Uint32 bound[1000];
+ Uint32 boundlen = op->getKeyFromSCANTABREQ(bound, 1000);
+
+ keylen1 = keylen2 = 0;
+ Uint32 n = 0;
+ while (n < boundlen) {
+ Uint32 t = bound[n];
+ AttributeHeader ah(bound[n + 1]);
+ Uint32 sz = 2 + ah.getDataSize();
+ t &= 0xFFFF; // may contain length
+ assert(t <= 4);
+ bound[n] = t;
+ if (t == 0 || t == 1 || t == 4) {
+ memcpy(&key1[keylen1], &bound[n], sz << 2);
+ keylen1 += sz;
+ }
+ if (t == 2 || t == 3 || t == 4) {
+ memcpy(&key2[keylen2], &bound[n], sz << 2);
+ keylen2 += sz;
+ }
+ n += sz;
+ }
+ }
+
+ if (flags & RR_UseDb) {
+ Uint32 out[4] = { 0, 0, 0, 0 }; // rows, in, before, after
+ float tot[4] = { 0, 0, 0, 0 }; // totals of above
+ int cnt, ret;
+ bool forceSend = true;
+ NdbTransaction* trans = op->m_transConnection;
+ if (op->interpret_exit_last_row() == -1 ||
+ op->getValue(NdbDictionary::Column::RECORDS_IN_RANGE, (char*)out) == 0) {
+ DBUG_PRINT("error", ("op:%d", op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ if (trans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AbortOnError, forceSend) == -1) {
+ DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
+ op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ cnt = 0;
+ while ((ret = op->nextResult(true, forceSend)) == 0) {
+ DBUG_PRINT("info", ("frag rows=%u in=%u before=%u after=%u [error=%d]",
+ out[0], out[1], out[2], out[3],
+ (int)(out[1] + out[2] + out[3]) - (int)out[0]));
+ unsigned i;
+ for (i = 0; i < 4; i++)
+ tot[i] += (float)out[i];
+ cnt++;
+ }
+ if (ret == -1) {
+ DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
+ op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ op->close(forceSend);
+ rows = (Uint64)tot[1];
+ if (cnt != 0 && ! (flags & RR_NoUpdate)) {
+ float pct[2];
+ pct[0] = 100 * tot[2] / tot[0];
+ pct[1] = 100 * tot[3] / tot[0];
+ DBUG_PRINT("info", ("update stat pct"
+ " before=%.2f after=%.2f",
+ pct[0], pct[1]));
+ stat_update(key1, keylen1, key2, keylen2, pct);
+ }
+ } else {
+ float pct[2];
+ stat_select(key1, keylen1, key2, keylen2, pct);
+ float diff = 100.0 - (pct[0] + pct[1]);
+ float trows = (float)table_rows;
+ DBUG_PRINT("info", ("select stat pct"
+ " before=%.2f after=%.2f in=%.2f table_rows=%.2f",
+ pct[0], pct[1], diff, trows));
+ rows = 0;
+ if (diff >= 0)
+ rows = (Uint64)(diff * trows / 100);
+ if (rows == 0)
+ rows = 1;
+ }
+
+ *count = rows;
+ DBUG_PRINT("value", ("rows=%llu flags=%o", rows, flags));
+ DBUG_RETURN(0);
+}
+
+void
+NdbIndexStat::set_error(int code)
+{
+ m_error.code = code;
+}
+
+const NdbError&
+NdbIndexStat::getNdbError() const
+{
+ return m_error;
+}
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index 288b8dc8bd8..cd2a46e7f7c 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -1198,6 +1198,31 @@ error:
return -1;
}
+Uint32
+NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
+{
+ DBUG_ENTER("NdbIndexScanOperation::getKeyFromSCANTABREQ");
+ assert(size >= theTotalNrOfKeyWordInSignal);
+ size = theTotalNrOfKeyWordInSignal;
+ NdbApiSignal* tSignal = theSCAN_TABREQ->next();
+ Uint32 pos = 0;
+ while (pos < size) {
+ assert(tSignal != NULL);
+ Uint32* tData = tSignal->getDataPtrSend();
+ Uint32 rem = size - pos;
+ if (rem > KeyInfo::DataLength)
+ rem = KeyInfo::DataLength;
+ Uint32 i = 0;
+ while (i < rem) {
+ data[pos + i] = tData[KeyInfo::HeaderLength + i];
+ i++;
+ }
+ pos += rem;
+ }
+ DBUG_DUMP("key", (char*)data, size << 2);
+ DBUG_RETURN(size);
+}
+
int
NdbIndexScanOperation::readTuples(LockMode lm,
Uint32 scan_flags,