summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpekka@mysql.com <>2005-09-15 02:33:28 +0200
committerpekka@mysql.com <>2005-09-15 02:33:28 +0200
commit81d8cc1e8411d482b48cafda64cddc5e446632cc (patch)
treef35bfbf589887642a59e75b7c082269a709cd86e
parent44a6b536d2fabad2bd57dd6336afd6e6578aeacb (diff)
downloadmariadb-git-81d8cc1e8411d482b48cafda64cddc5e446632cc.tar.gz
ndb - wl#2624 re-commit due to bk problem
-rw-r--r--mysql-test/r/ndb_basic.result2
-rw-r--r--mysql-test/r/ndb_blob.result6
-rw-r--r--mysql-test/r/ndb_charset.result4
-rw-r--r--mysql-test/r/ndb_condition_pushdown.result26
-rw-r--r--mysql-test/r/ndb_index_ordered.result138
-rw-r--r--mysql-test/t/ndb_index_ordered.test74
-rw-r--r--sql/ha_ndbcluster.cc127
-rw-r--r--sql/ha_ndbcluster.h10
-rw-r--r--sql/mysqld.cc25
-rw-r--r--sql/set_var.cc17
-rw-r--r--sql/sql_class.h3
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp2
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp2
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp3
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexStat.hpp141
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp13
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp12
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp17
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp160
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp91
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Makefile.am1
-rw-r--r--storage/ndb/src/ndbapi/Makefile.am1
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp1
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp11
-rw-r--r--storage/ndb/src/ndbapi/NdbIndexStat.cpp490
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp25
27 files changed, 1336 insertions, 70 deletions
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index a374f845933..0a46a3db05c 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -568,7 +568,7 @@ t1
insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1);
explain select * from t1 where a12345678901234567890123456789a1234567890=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const 10 Using where
+1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const 1 Using where
select * from t1 where a12345678901234567890123456789a1234567890=2;
a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890
5 2
diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result
index f806cf08ea9..f28cb865962 100644
--- a/mysql-test/r/ndb_blob.result
+++ b/mysql-test/r/ndb_blob.result
@@ -134,7 +134,7 @@ insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c = 111;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref c c 4 const 10
+1 SIMPLE t1 ref c c 4 const 1
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=111;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
@@ -242,7 +242,7 @@ insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
+1 SIMPLE t1 range c c 4 NULL 9 Using where; Using filesort
select * from t1 where c >= 100 order by a;
a b c d
1 b1 111 dd1
@@ -278,7 +278,7 @@ insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
+1 SIMPLE t1 range c c 4 NULL 2 Using where; Using filesort
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c >= 100 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result
index 500b0497890..32843fd7731 100644
--- a/mysql-test/r/ndb_charset.result
+++ b/mysql-test/r/ndb_charset.result
@@ -188,7 +188,7 @@ p a
6 AAA
explain select * from t1 where a = 'zZz' order by p;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
+1 SIMPLE t1 const a NULL NULL NULL 1
select * from t1 where a = 'aAa' order by p;
p a
1 aAa
@@ -225,7 +225,7 @@ p a
6 AAA
explain select * from t1 where a = 'zZz' order by p;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
+1 SIMPLE t1 const a NULL NULL NULL 1
select * from t1 where a = 'aAa' order by p;
p a
1 aAa
diff --git a/mysql-test/r/ndb_condition_pushdown.result b/mysql-test/r/ndb_condition_pushdown.result
index 1c3da1b486f..449120c6c79 100644
--- a/mysql-test/r/ndb_condition_pushdown.result
+++ b/mysql-test/r/ndb_condition_pushdown.result
@@ -868,7 +868,7 @@ time_field = '01:01:01' and
date_time = '1901-01-01 01:01:01'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref medium_index medium_index 3 const 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 ref medium_index medium_index 3 const 1 Using where with pushed condition; Using filesort
select auto from t1 where
string = "aaaa" and
vstring = "aaaa" and
@@ -925,7 +925,7 @@ time_field != '01:01:01' and
date_time != '1901-01-01 01:01:01'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 20 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 3 Using where with pushed condition; Using filesort
select auto from t1 where
string != "aaaa" and
vstring != "aaaa" and
@@ -984,7 +984,7 @@ time_field > '01:01:01' and
date_time > '1901-01-01 01:01:01'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 3 Using where with pushed condition; Using filesort
select auto from t1 where
string > "aaaa" and
vstring > "aaaa" and
@@ -1043,7 +1043,7 @@ time_field >= '01:01:01' and
date_time >= '1901-01-01 01:01:01'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 4 Using where with pushed condition; Using filesort
select auto from t1 where
string >= "aaaa" and
vstring >= "aaaa" and
@@ -1103,7 +1103,7 @@ time_field < '04:04:04' and
date_time < '1904-04-04 04:04:04'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 3 Using where with pushed condition; Using filesort
select auto from t1 where
string < "dddd" and
vstring < "dddd" and
@@ -1162,7 +1162,7 @@ time_field <= '04:04:04' and
date_time <= '1904-04-04 04:04:04'
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 4 Using where with pushed condition; Using filesort
select auto from t1 where
string <= "dddd" and
vstring <= "dddd" and
@@ -1255,7 +1255,7 @@ select auto from t1 where
(date_time between '1901-01-01 01:01:01' and '1903-03-03 03:03:03')
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 3 Using where with pushed condition; Using filesort
select auto from t1 where
(string between "aaaa" and "cccc") and
(vstring between "aaaa" and "cccc") and
@@ -1358,7 +1358,7 @@ select auto from t1 where
(date_time not between '1901-01-01 01:01:01' and '1903-03-03 03:03:03')
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 20 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 1 Using where with pushed condition; Using filesort
select auto from t1 where
(string not between "aaaa" and "cccc") and
(vstring not between "aaaa" and "cccc") and
@@ -1462,7 +1462,7 @@ time_field in('01:01:01','03:03:03') and
date_time in('1901-01-01 01:01:01','1903-03-03 03:03:03')
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 20 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 2 Using where with pushed condition; Using filesort
select auto from t1 where
string in("aaaa","cccc") and
vstring in("aaaa","cccc") and
@@ -1514,7 +1514,7 @@ select auto from t1 where
'1901-01-01 01:01:01' in(date_time)
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref medium_index medium_index 3 const 10 Using where with pushed condition; Using filesort
+1 SIMPLE t1 ref medium_index medium_index 3 const 1 Using where with pushed condition; Using filesort
select auto from t1 where
"aaaa" in(string) and
"aaaa" in(vstring) and
@@ -1565,7 +1565,7 @@ time_field not in('01:01:01','03:03:03') and
date_time not in('1901-01-01 01:01:01','1903-03-03 03:03:03')
order by auto;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range medium_index medium_index 3 NULL 30 Using where with pushed condition; Using filesort
+1 SIMPLE t1 range medium_index medium_index 3 NULL 2 Using where with pushed condition; Using filesort
select auto from t1 where
string not in("aaaa","cccc") and
vstring not in("aaaa","cccc") and
@@ -1738,7 +1738,7 @@ pk1 attr1 attr2 attr3 pk1 attr1 attr2 attr3 attr4
explain
select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range attr1 attr1 4 NULL 10 Using where with pushed condition; Using filesort
+1 SIMPLE t4 range attr1 attr1 4 NULL 5 Using where with pushed condition; Using filesort
select * from t4 where attr1 < 5 and attr2 > 9223372036854775803 and attr3 != 3 order by t4.pk1;
pk1 attr1 attr2 attr3 attr4
2 2 9223372036854775804 2 c
@@ -1746,7 +1746,7 @@ pk1 attr1 attr2 attr3 attr4
explain
select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range attr1 attr1 4 NULL 10 Using where with pushed condition; Using temporary; Using filesort
+1 SIMPLE t4 range attr1 attr1 4 NULL 4 Using where with pushed condition; Using temporary; Using filesort
1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where
select * from t3,t4 where t4.attr1 > 1 and t4.attr2 = t3.attr2 and t4.attr3 < 5 order by t4.pk1;
pk1 attr1 attr2 attr3 attr4 pk1 attr1 attr2 attr3 attr4
diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result
index 36bac7b0f9d..b4871180706 100644
--- a/mysql-test/r/ndb_index_ordered.result
+++ b/mysql-test/r/ndb_index_ordered.result
@@ -658,3 +658,141 @@ insert into t1 (a, c) values (1,'aaa'),(3,'bbb');
select count(*) from t1 where c<'bbb';
count(*)
1
+drop table t1;
+set autocommit=1;
+show session variables like 'ndb_index_stat_%';
+Variable_name Value
+ndb_index_stat_cache_entries 32
+ndb_index_stat_enable ON
+ndb_index_stat_update_freq 20
+set ndb_index_stat_enable = off;
+show session variables like 'ndb_index_stat_%';
+Variable_name Value
+ndb_index_stat_cache_entries 32
+ndb_index_stat_enable OFF
+ndb_index_stat_update_freq 20
+create table t1 (a int, b int, c varchar(10) not null,
+primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+(1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+(4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+(7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+count(*)
+0
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+count(*)
+6
+select count(*) from t1 where b > 10;
+count(*)
+6
+select count(*) from t1 where b <= 20 and c < 'ccc';
+count(*)
+4
+select count(*) from t1 where b = 20 and c = 'ccc';
+count(*)
+1
+select count(*) from t1 where b > 20;
+count(*)
+3
+select count(*) from t1 where b = 30 and c > 'aaa';
+count(*)
+2
+select count(*) from t1 where b <= 20;
+count(*)
+6
+select count(*) from t1 where b >= 20 and c > 'aaa';
+count(*)
+4
+drop table t1;
+set ndb_index_stat_enable = on;
+set ndb_index_stat_cache_entries = 0;
+show session variables like 'ndb_index_stat_%';
+Variable_name Value
+ndb_index_stat_cache_entries 0
+ndb_index_stat_enable ON
+ndb_index_stat_update_freq 20
+create table t1 (a int, b int, c varchar(10) not null,
+primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+(1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+(4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+(7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+count(*)
+0
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+count(*)
+6
+select count(*) from t1 where b > 10;
+count(*)
+6
+select count(*) from t1 where b <= 20 and c < 'ccc';
+count(*)
+4
+select count(*) from t1 where b = 20 and c = 'ccc';
+count(*)
+1
+select count(*) from t1 where b > 20;
+count(*)
+3
+select count(*) from t1 where b = 30 and c > 'aaa';
+count(*)
+2
+select count(*) from t1 where b <= 20;
+count(*)
+6
+select count(*) from t1 where b >= 20 and c > 'aaa';
+count(*)
+4
+drop table t1;
+set ndb_index_stat_enable = on;
+set ndb_index_stat_cache_entries = 4;
+set ndb_index_stat_update_freq = 2;
+show session variables like 'ndb_index_stat_%';
+Variable_name Value
+ndb_index_stat_cache_entries 4
+ndb_index_stat_enable ON
+ndb_index_stat_update_freq 2
+create table t1 (a int, b int, c varchar(10) not null,
+primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+(1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+(4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+(7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+count(*)
+0
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+count(*)
+6
+select count(*) from t1 where b > 10;
+count(*)
+6
+select count(*) from t1 where b <= 20 and c < 'ccc';
+count(*)
+4
+select count(*) from t1 where b = 20 and c = 'ccc';
+count(*)
+1
+select count(*) from t1 where b > 20;
+count(*)
+3
+select count(*) from t1 where b = 30 and c > 'aaa';
+count(*)
+2
+select count(*) from t1 where b <= 20;
+count(*)
+6
+select count(*) from t1 where b >= 20 and c > 'aaa';
+count(*)
+4
+drop table t1;
+set ndb_index_stat_enable = @@global.ndb_index_stat_enable;
+set ndb_index_stat_cache_entries = @@global.ndb_index_stat_cache_entries;
+set ndb_index_stat_update_freq = @@global.ndb_index_stat_update_freq;
+show session variables like 'ndb_index_stat_%';
+Variable_name Value
+ndb_index_stat_cache_entries 32
+ndb_index_stat_enable ON
+ndb_index_stat_update_freq 20
diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test
index e6827bdbe12..a03e0729ece 100644
--- a/mysql-test/t/ndb_index_ordered.test
+++ b/mysql-test/t/ndb_index_ordered.test
@@ -354,5 +354,79 @@ create table t1 (a int, c varchar(10),
primary key using hash (a), index(c)) engine=ndb;
insert into t1 (a, c) values (1,'aaa'),(3,'bbb');
select count(*) from t1 where c<'bbb';
+drop table t1;
+
+# -- index statistics --
+
+set autocommit=1;
+show session variables like 'ndb_index_stat_%';
+
+set ndb_index_stat_enable = off;
+show session variables like 'ndb_index_stat_%';
+
+create table t1 (a int, b int, c varchar(10) not null,
+ primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+ (1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+ (4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+ (7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+select count(*) from t1 where b > 10;
+select count(*) from t1 where b <= 20 and c < 'ccc';
+select count(*) from t1 where b = 20 and c = 'ccc';
+select count(*) from t1 where b > 20;
+select count(*) from t1 where b = 30 and c > 'aaa';
+select count(*) from t1 where b <= 20;
+select count(*) from t1 where b >= 20 and c > 'aaa';
+drop table t1;
+
+set ndb_index_stat_enable = on;
+set ndb_index_stat_cache_entries = 0;
+show session variables like 'ndb_index_stat_%';
+
+create table t1 (a int, b int, c varchar(10) not null,
+ primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+ (1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+ (4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+ (7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+select count(*) from t1 where b > 10;
+select count(*) from t1 where b <= 20 and c < 'ccc';
+select count(*) from t1 where b = 20 and c = 'ccc';
+select count(*) from t1 where b > 20;
+select count(*) from t1 where b = 30 and c > 'aaa';
+select count(*) from t1 where b <= 20;
+select count(*) from t1 where b >= 20 and c > 'aaa';
+drop table t1;
+
+set ndb_index_stat_enable = on;
+set ndb_index_stat_cache_entries = 4;
+set ndb_index_stat_update_freq = 2;
+show session variables like 'ndb_index_stat_%';
+
+create table t1 (a int, b int, c varchar(10) not null,
+ primary key using hash (a), index(b,c)) engine=ndb;
+insert into t1 values
+ (1,10,'aaa'),(2,10,'bbb'),(3,10,'ccc'),
+ (4,20,'aaa'),(5,20,'bbb'),(6,20,'ccc'),
+ (7,30,'aaa'),(8,30,'bbb'),(9,30,'ccc');
+select count(*) from t1 where b < 10;
+select count(*) from t1 where b >= 10 and c >= 'bbb';
+select count(*) from t1 where b > 10;
+select count(*) from t1 where b <= 20 and c < 'ccc';
+select count(*) from t1 where b = 20 and c = 'ccc';
+select count(*) from t1 where b > 20;
+select count(*) from t1 where b = 30 and c > 'aaa';
+select count(*) from t1 where b <= 20;
+select count(*) from t1 where b >= 20 and c > 'aaa';
+drop table t1;
+
+set ndb_index_stat_enable = @@global.ndb_index_stat_enable;
+set ndb_index_stat_cache_entries = @@global.ndb_index_stat_cache_entries;
+set ndb_index_stat_update_freq = @@global.ndb_index_stat_update_freq;
+show session variables like 'ndb_index_stat_%';
# End of 4.1 tests
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 1eef65df00f..cc1cec57a63 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -31,6 +31,7 @@
#include "ha_ndbcluster.h"
#include <ndbapi/NdbApi.hpp>
#include <ndbapi/NdbScanFilter.hpp>
+#include <ndbapi/NdbIndexStat.hpp>
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
@@ -85,6 +86,14 @@ static handlerton ndbcluster_hton = {
DBUG_RETURN(ndb_to_mysql_error(&tmp)); \
}
+#define ERR_BREAK(err, code) \
+{ \
+ const NdbError& tmp= err; \
+ ERR_PRINT(tmp); \
+ code= ndb_to_mysql_error(&tmp); \
+ break; \
+}
+
// Typedefs for long names
typedef NdbDictionary::Column NDBCOL;
typedef NdbDictionary::Table NDBTAB;
@@ -1064,6 +1073,26 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
const NDBINDEX *index= dict->getIndex(index_name, m_tabname);
if (!index) DBUG_RETURN(1);
m_index[i].index= (void *) index;
+ // ordered index - add stats
+ NDB_INDEX_DATA& d=m_index[i];
+ delete d.index_stat;
+ d.index_stat=NULL;
+ THD *thd=current_thd;
+ if (thd->variables.ndb_index_stat_enable)
+ {
+ d.index_stat=new NdbIndexStat(index);
+ d.index_stat_cache_entries=thd->variables.ndb_index_stat_cache_entries;
+ d.index_stat_update_freq=thd->variables.ndb_index_stat_update_freq;
+ d.index_stat_query_count=0;
+ d.index_stat->alloc_cache(d.index_stat_cache_entries);
+ DBUG_PRINT("info", ("index %s stat=on cache_entries=%u update_freq=%u",
+ index->getName(),
+ d.index_stat_cache_entries,
+ d.index_stat_update_freq));
+ } else
+ {
+ DBUG_PRINT("info", ("index %s stat=off", index->getName()));
+ }
}
if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
{
@@ -1135,6 +1164,8 @@ void ha_ndbcluster::release_metadata()
my_free((char *)m_index[i].unique_index_attrid_map, MYF(0));
m_index[i].unique_index_attrid_map= NULL;
}
+ delete m_index[i].index_stat;
+ m_index[i].index_stat=NULL;
}
DBUG_VOID_RETURN;
@@ -1648,10 +1679,12 @@ inline int ha_ndbcluster::next_result(byte *buf)
*/
int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
+ uint inx,
+ bool rir,
const key_range *keys[2],
uint range_no)
{
- const KEY *const key_info= table->key_info + active_index;
+ const KEY *const key_info= table->key_info + inx;
const uint key_parts= key_info->key_parts;
uint key_tot_len[2];
uint tot_len;
@@ -1716,7 +1749,10 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
switch (p.key->flag)
{
case HA_READ_KEY_EXACT:
- p.bound_type= NdbIndexScanOperation::BoundEQ;
+ if (! rir)
+ p.bound_type= NdbIndexScanOperation::BoundEQ;
+ else // differs for records_in_range
+ p.bound_type= NdbIndexScanOperation::BoundLE;
break;
// ascending
case HA_READ_KEY_OR_NEXT:
@@ -1874,7 +1910,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
{
const key_range *keys[2]= { start_key, end_key };
- res= set_bounds(op, keys);
+ res= set_bounds(op, active_index, false, keys);
if (res)
DBUG_RETURN(res);
}
@@ -4162,6 +4198,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_index[i].unique_index= NULL;
m_index[i].index= NULL;
m_index[i].unique_index_attrid_map= NULL;
+ m_index[i].index_stat=NULL;
+ m_index[i].index_stat_cache_entries=0;
+ m_index[i].index_stat_update_freq=0;
+ m_index[i].index_stat_query_count=0;
}
DBUG_VOID_RETURN;
@@ -4914,6 +4954,84 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
(max_key && max_key->length == key_length)))
DBUG_RETURN(1);
+ if ((idx_type == PRIMARY_KEY_ORDERED_INDEX ||
+ idx_type == UNIQUE_ORDERED_INDEX ||
+ idx_type == ORDERED_INDEX) &&
+ m_index[inx].index_stat != NULL)
+ {
+ NDB_INDEX_DATA& d=m_index[inx];
+ NDBINDEX* index=(NDBINDEX*)d.index;
+ Ndb* ndb=get_ndb();
+ NdbTransaction* trans=NULL;
+ NdbIndexScanOperation* op=NULL;
+ int res=0;
+ Uint64 rows;
+
+ do
+ {
+ // We must provide approx table rows
+ Uint64 table_rows=0;
+ Ndb_local_table_statistics *info=
+ (Ndb_local_table_statistics *)m_table_info;
+ if (info->records != ~(ha_rows)0 && info->records != 0)
+ {
+ table_rows = info->records;
+ DBUG_PRINT("info", ("use info->records: %llu", table_rows));
+ }
+ else
+ {
+ Ndb_statistics stat;
+ if ((res=ndb_get_table_statistics(ndb, m_tabname, &stat)) != 0)
+ break;
+ table_rows=stat.row_count;
+ DBUG_PRINT("info", ("use db row_count: %llu", table_rows));
+ if (table_rows == 0) {
+ // Problem if autocommit=0
+#ifdef ndb_get_table_statistics_uses_active_trans
+ rows=0;
+ break;
+#endif
+ }
+ }
+
+ // Define scan op for the range
+ if ((trans=m_active_trans) == NULL)
+ {
+ DBUG_PRINT("info", ("no active trans"));
+ if (! (trans=ndb->startTransaction()))
+ ERR_BREAK(ndb->getNdbError(), res);
+ }
+ if (! (op=trans->getNdbIndexScanOperation(index, (NDBTAB*)m_table)))
+ ERR_BREAK(trans->getNdbError(), res);
+ if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1)
+ ERR_BREAK(op->getNdbError(), res);
+ const key_range *keys[2]={ min_key, max_key };
+ if ((res=set_bounds(op, inx, true, keys)) != 0)
+ break;
+
+ // Decide if db should be contacted
+ int flags=0;
+ if (d.index_stat_query_count < d.index_stat_cache_entries ||
+ (d.index_stat_update_freq != 0 &&
+ d.index_stat_query_count % d.index_stat_update_freq == 0))
+ {
+ DBUG_PRINT("info", ("force stat from db"));
+ flags|=NdbIndexStat::RR_UseDb;
+ }
+ if (d.index_stat->records_in_range(index, op, table_rows, &rows, flags) == -1)
+ ERR_BREAK(d.index_stat->getNdbError(), res);
+ d.index_stat_query_count++;
+ } while (0);
+
+ if (trans != m_active_trans && rows == 0)
+ rows = 1;
+ if (trans != m_active_trans && trans != NULL)
+ ndb->closeTransaction(trans);
+ if (res != 0)
+ DBUG_RETURN(HA_POS_ERROR);
+ DBUG_RETURN(rows);
+ }
+
DBUG_RETURN(10); /* Good guess when you don't know anything */
}
@@ -5612,7 +5730,8 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
const key_range *keys[2]= { &multi_range_curr->start_key,
&multi_range_curr->end_key };
- if ((res= set_bounds(scanOp, keys, multi_range_curr-ranges)))
+ if ((res= set_bounds(scanOp, active_index, false, keys,
+ multi_range_curr-ranges)))
DBUG_RETURN(res);
break;
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index bf9891c364b..0d99e9c1220 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -35,6 +35,7 @@ class NdbScanOperation;
class NdbScanFilter;
class NdbIndexScanOperation;
class NdbBlob;
+class NdbIndexStat;
// connectstring to cluster if given by mysqld
extern const char *ndbcluster_connectstring;
@@ -54,6 +55,12 @@ typedef struct ndb_index_data {
void *index;
void *unique_index;
unsigned char *unique_index_attrid_map;
+ // In this version stats are not shared between threads
+ NdbIndexStat* index_stat;
+ uint index_stat_cache_entries;
+ // Simple counter mechanism to decide when to connect to db
+ uint index_stat_update_freq;
+ uint index_stat_query_count;
} NDB_INDEX_DATA;
typedef struct st_ndbcluster_share {
@@ -642,7 +649,8 @@ private:
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key_from_record(NdbOperation *op, const byte *record);
- int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0);
+ int set_bounds(NdbIndexScanOperation*, uint inx, bool rir,
+ const key_range *keys[2], uint= 0);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr);
void print_results();
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index aaed7b64377..6f56a11a8b6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -4335,6 +4335,8 @@ enum options_mysqld
OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
OPT_NDB_MGMD, OPT_NDB_NODEID,
OPT_NDB_DISTRIBUTION,
+ OPT_NDB_INDEX_STAT_ENABLE,
+ OPT_NDB_INDEX_STAT_CACHE_ENTRIES, OPT_NDB_INDEX_STAT_UPDATE_FREQ,
OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -4949,6 +4951,23 @@ Disable with --skip-ndbcluster (will save memory).",
"A dedicated thread is created to, at the given millisecons interval, invalidate the query cache if another MySQL server in the cluster has changed the data in the database.",
(gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG,
0, 0, LONG_TIMEOUT, 0, 1, 0},
+ {"ndb-index-stat-enable", OPT_NDB_INDEX_STAT_ENABLE,
+ "Use ndb index statistics in query optimization.",
+ (gptr*) &global_system_variables.ndb_index_stat_enable,
+ (gptr*) &max_system_variables.ndb_index_stat_enable,
+ 0, GET_BOOL, OPT_ARG, 1, 0, 1, 0, 0, 0},
+ {"ndb-index-stat-cache-entries", OPT_NDB_INDEX_STAT_CACHE_ENTRIES,
+ "Number of start/end keys to store in statistics memory cache."
+ " Zero means no cache and forces query of db nodes always.",
+ (gptr*) &global_system_variables.ndb_index_stat_cache_entries,
+ (gptr*) &max_system_variables.ndb_index_stat_cache_entries,
+ 0, GET_ULONG, OPT_ARG, 32, 0, ~0L, 0, 0, 0},
+ {"ndb-index-stat-update-freq", OPT_NDB_INDEX_STAT_UPDATE_FREQ,
+ "How often, in the long run, to query db nodes instead of statistics cache."
+ " For example 20 means every 20th time.",
+ (gptr*) &global_system_variables.ndb_index_stat_update_freq,
+ (gptr*) &max_system_variables.ndb_index_stat_update_freq,
+ 0, GET_ULONG, OPT_ARG, 20, 0, ~0L, 0, 0, 0},
#endif
{"new", 'n', "Use very new possible 'unsafe' functions.",
(gptr*) &global_system_variables.new_mode,
@@ -6211,6 +6230,12 @@ static void mysql_init_variables(void)
#endif
#ifdef HAVE_NDBCLUSTER_DB
have_ndbcluster=SHOW_OPTION_DISABLED;
+ global_system_variables.ndb_index_stat_enable=TRUE;
+ max_system_variables.ndb_index_stat_enable=TRUE;
+ global_system_variables.ndb_index_stat_cache_entries=32;
+ max_system_variables.ndb_index_stat_cache_entries=~0L;
+ global_system_variables.ndb_index_stat_update_freq=20;
+ max_system_variables.ndb_index_stat_update_freq=~0L;
#else
have_ndbcluster=SHOW_OPTION_NO;
#endif
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 4fe2570829e..e7adc7387c0 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -433,6 +433,15 @@ sys_var_thd_bool
sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions);
sys_var_long_ptr
sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time);
+sys_var_thd_bool
+sys_ndb_index_stat_enable("ndb_index_stat_enable",
+ &SV::ndb_index_stat_enable);
+sys_var_thd_ulong
+sys_ndb_index_stat_cache_entries("ndb_index_stat_cache_entries",
+ &SV::ndb_index_stat_cache_entries);
+sys_var_thd_ulong
+sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq",
+ &SV::ndb_index_stat_update_freq);
#endif
/* Time/date/datetime formats */
@@ -725,6 +734,9 @@ sys_var *sys_variables[]=
&sys_ndb_force_send,
&sys_ndb_use_exact_count,
&sys_ndb_use_transactions,
+ &sys_ndb_index_stat_enable,
+ &sys_ndb_index_stat_cache_entries,
+ &sys_ndb_index_stat_update_freq,
#endif
&sys_unique_checks,
&sys_updatable_views_with_limit,
@@ -906,10 +918,13 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_NDBCLUSTER_DB
{sys_ndb_autoincrement_prefetch_sz.name,
(char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS},
+ {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
{sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS},
+ {sys_ndb_index_stat_cache_entries.name, (char*) &sys_ndb_index_stat_cache_entries, SHOW_SYS},
+ {sys_ndb_index_stat_enable.name, (char*) &sys_ndb_index_stat_enable, SHOW_SYS},
+ {sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS},
{sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS},
{sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
- {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS},
#endif
{sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS},
{sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS},
diff --git a/sql/sql_class.h b/sql/sql_class.h
index dbc3c1a184f..59b51a702aa 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -565,6 +565,9 @@ struct system_variables
my_bool ndb_force_send;
my_bool ndb_use_exact_count;
my_bool ndb_use_transactions;
+ my_bool ndb_index_stat_enable;
+ ulong ndb_index_stat_cache_entries;
+ ulong ndb_index_stat_update_freq;
#endif /* HAVE_NDBCLUSTER_DB */
my_bool old_alter_table;
my_bool old_passwords;
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
index 7d89219b8b2..21b13472c37 100644
--- a/storage/ndb/include/kernel/AttributeHeader.hpp
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -41,6 +41,8 @@ public:
STATIC_CONST( ROW_SIZE = 0xFFFA );
STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
+ STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 );
+
/** Initialize AttributeHeader at location aHeaderPtr */
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
Uint32 aDataSize);
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 870af671959..00fe709677f 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -456,6 +456,7 @@ public:
static const Column * COMMIT_COUNT;
static const Column * ROW_SIZE;
static const Column * RANGE_NO;
+ static const Column * RECORDS_IN_RANGE;
int getSizeInBytes() const;
#endif
@@ -929,6 +930,7 @@ public:
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbIndexImpl;
+ friend class NdbIndexStat;
#endif
class NdbIndexImpl & m_impl;
Index(NdbIndexImpl&);
diff --git a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index c231b927581..fd7e9f2d05c 100644
--- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -30,6 +30,7 @@ class NdbIndexScanOperation : public NdbScanOperation {
friend class NdbResultSet;
friend class NdbOperation;
friend class NdbScanOperation;
+ friend class NdbIndexStat;
#endif
public:
@@ -149,12 +150,14 @@ public:
* Is current scan sorted descending
*/
bool getDescending() const { return m_descending; }
+
private:
NdbIndexScanOperation(Ndb* aNdb);
virtual ~NdbIndexScanOperation();
int setBound(const NdbColumnImpl*, int type, const void* aValue, Uint32 len);
int insertBOUNDS(Uint32 * data, Uint32 sz);
+ Uint32 getKeyFromSCANTABREQ(Uint32* data, Uint32 size);
virtual int equal_impl(const NdbColumnImpl*, const char*, Uint32);
virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*);
diff --git a/storage/ndb/include/ndbapi/NdbIndexStat.hpp b/storage/ndb/include/ndbapi/NdbIndexStat.hpp
new file mode 100644
index 00000000000..7666166b657
--- /dev/null
+++ b/storage/ndb/include/ndbapi/NdbIndexStat.hpp
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NdbIndexStat_H
+#define NdbIndexStat_H
+
+#include <ndb_global.h>
+#include <NdbDictionary.hpp>
+#include <NdbError.hpp>
+class NdbIndexImpl;
+class NdbIndexScanOperation;
+
+/*
+ * Statistics for an ordered index.
+ */
+class NdbIndexStat {
+public:
+ NdbIndexStat(const NdbDictionary::Index* index);
+ ~NdbIndexStat();
+ /*
+ * Allocate memory for cache. Argument is minimum number of stat
+ * entries and applies to lower and upper bounds separately. More
+ * entries may fit (keys have variable size). If not used, db is
+ * contacted always.
+ */
+ int alloc_cache(Uint32 entries);
+ /*
+ * Flags for records_in_range.
+ */
+ enum {
+ RR_UseDb = 1, // contact db
+ RR_NoUpdate = 2 // but do not update cache
+ };
+ /*
+ * Estimate how many index records need to be scanned. The scan
+ * operation must be prepared with lock mode LM_CommittedRead and must
+ * have the desired bounds set. The routine may use local cache or
+ * may contact db by executing the operation.
+ *
+ * If returned count is zero then db was contacted and the count is
+ * exact. Otherwise the count is approximate. If cache is used then
+ * caller must provide estimated number of table rows. It will be
+ * multiplied by a percentage obtained from the cache (result zero is
+ * returned as 1).
+ */
+ int records_in_range(NdbDictionary::Index* index,
+ NdbIndexScanOperation* op,
+ Uint64 table_rows,
+ Uint64* count,
+ int flags);
+ /*
+ * Get latest error.
+ */
+ const NdbError& getNdbError() const;
+
+private:
+ /*
+ * There are 2 areas: start keys and end keys. An area has pointers
+ * at beginning and entries at end. Pointers are sorted by key.
+ *
+ * A pointer contains entry offset and also entry timestamp. An entry
+ * contains the key and percentage of rows _not_ satisfying the bound
+ * i.e. less than start key or greater than end key.
+ *
+ * A key is an array of index key bounds. Each has type (0-4) in
+ * first word followed by data with AttributeHeader.
+ *
+ * Stat update comes as pair of start and end key and associated
+ * percentages. Stat query takes best match of start and end key from
+ * each area separately. Rows in range percentage is then computed by
+ * excluding the two i.e. as 100 - (start key pct + end key pct).
+ *
+ * TODO use more compact key format
+ */
+ friend struct Area;
+ struct Pointer {
+ Uint16 m_pos;
+ Uint16 m_seq;
+ };
+ struct Entry {
+ float m_pct;
+ Uint32 m_keylen;
+ };
+ STATIC_CONST( EntrySize = sizeof(Entry) >> 2 );
+ STATIC_CONST( PointerSize = sizeof(Pointer) >> 2 );
+ struct Area {
+ Uint32* m_data;
+ Uint32 m_offset;
+ Uint32 m_free;
+ Uint16 m_entries;
+ Uint8 m_idir;
+ Uint8 pad1;
+ Pointer& get_pointer(unsigned i) const {
+ return *(Pointer*)&m_data[i];
+ }
+ Entry& get_entry(unsigned i) const {
+ return *(Entry*)&m_data[get_pointer(i).m_pos];
+ }
+ Uint32 get_pos(const Entry& e) const {
+ return (const Uint32*)&e - m_data;
+ }
+ unsigned get_firstpos() const {
+ return PointerSize * m_entries + m_free;
+ }
+ };
+ const NdbIndexImpl& m_index;
+ Uint32 m_areasize;
+ Uint16 m_seq;
+ Area m_area[2];
+ Uint32* m_cache;
+ NdbError m_error;
+#ifdef VM_TRACE
+ void stat_verify();
+#endif
+ int stat_cmpkey(const Area& a, const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2);
+ int stat_search(const Area& a, const Uint32* key, Uint32 keylen,
+ Uint32* idx, bool* match);
+ int stat_oldest(const Area& a);
+ int stat_delete(Area& a, Uint32 k);
+ int stat_update(const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2, const float pct[2]);
+ int stat_select(const Uint32* key1, Uint32 keylen1,
+ const Uint32* key2, Uint32 keylen2, float pct[2]);
+ void set_error(int code);
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 538850c4fb1..42688796801 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -2622,7 +2622,10 @@ Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
regTcPtr.i = signal->theData[0];
ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
- if(signal->theData[1] != AttributeHeader::RANGE_NO)
+ if (signal->theData[1] == AttributeHeader::RANGE_NO) {
+ signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ }
+ else if (signal->theData[1] != AttributeHeader::RECORDS_IN_RANGE)
{
jam();
FragrecordPtr regFragptr;
@@ -2634,7 +2637,13 @@ Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
}
else
{
- signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ jam();
+ // scanptr gets reset somewhere within the timeslice
+ ScanRecordPtr tmp;
+ tmp.i = regTcPtr.p->tcScanRec;
+ c_scanRecordPool.getPtr(tmp);
+ signal->theData[0] = tmp.p->scanAccPtr;
+ EXECUTE_DIRECT(DBTUX, GSN_READ_PSEUDO_REQ, signal, 2);
}
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index 535ff50bcd5..df3f06f4594 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -1028,6 +1028,18 @@ Dbtup::read_pseudo(Uint32 attrId, Uint32* outBuffer){
EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
return 1;
+
+ case AttributeHeader::RECORDS_IN_RANGE:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ outBuffer[1] = signal->theData[1];
+ outBuffer[2] = signal->theData[2];
+ outBuffer[3] = signal->theData[3];
+ return 4;
+
default:
return 0;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index d4a44b9e641..b34fd5151c2 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -82,10 +82,14 @@
#define jam() jamLine(80000 + __LINE__)
#define jamEntry() jamEntryLine(80000 + __LINE__)
#endif
-#ifdef DBTUX_DEBUG_CPP
+#ifdef DBTUX_STAT_CPP
#define jam() jamLine(90000 + __LINE__)
#define jamEntry() jamEntryLine(90000 + __LINE__)
#endif
+#ifdef DBTUX_DEBUG_CPP
+#define jam() jamLine(100000 + __LINE__)
+#define jamEntry() jamEntryLine(100000 + __LINE__)
+#endif
#ifndef jam
#define jam() jamLine(__LINE__)
#define jamEntry() jamEntryLine(__LINE__)
@@ -116,6 +120,7 @@ private:
STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
STATIC_CONST( ScanBoundSegmentSize = 7 );
STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
+ STATIC_CONST( MaxTreeDepth = 32 ); // strict
BLOCK_DEFINES(Dbtux);
// forward declarations
@@ -269,6 +274,7 @@ private:
Uint8 m_prefSize; // words in min prefix
Uint8 m_minOccup; // min entries in internal node
Uint8 m_maxOccup; // max entries in node
+ Uint32 m_entryCount; // stat: current entries
TupLoc m_root; // root node
TreeHead();
// methods
@@ -660,6 +666,14 @@ private:
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
/*
+ * DbtuxStat.cpp
+ */
+ void execREAD_PSEUDO_REQ(Signal* signal);
+ void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out);
+ Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir);
+ unsigned getPathToNode(NodeHandle node, Uint16* path);
+
+ /*
* DbtuxDebug.cpp
*/
void execDUMP_STATE_ORD(Signal* signal);
@@ -952,6 +966,7 @@ Dbtux::TreeHead::TreeHead() :
m_prefSize(0),
m_minOccup(0),
m_maxOccup(0),
+ m_entryCount(0),
m_root()
{
}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index 5640fdf2899..7c7d762d1e9 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -66,6 +66,10 @@ Dbtux::Dbtux(const Configuration& conf) :
addRecSignal(GSN_ACCKEYREF, &Dbtux::execACCKEYREF);
addRecSignal(GSN_ACC_ABORTCONF, &Dbtux::execACC_ABORTCONF);
/*
+ * DbtuxStat.cpp
+ */
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dbtux::execREAD_PSEUDO_REQ);
+ /*
* DbtuxDebug.cpp
*/
addRecSignal(GSN_DUMP_STATE_ORD, &Dbtux::execDUMP_STATE_ORD);
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
new file mode 100644
index 00000000000..23fb409b63c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
@@ -0,0 +1,160 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_STAT_CPP
+#include "Dbtux.hpp"
+
+void
+Dbtux::execREAD_PSEUDO_REQ(Signal* signal)
+{
+ jamEntry();
+ ScanOpPtr scanPtr;
+ scanPtr.i = signal->theData[0];
+ c_scanOpPool.getPtr(scanPtr);
+ if (signal->theData[1] == AttributeHeader::RECORDS_IN_RANGE) {
+ jam();
+ statRecordsInRange(scanPtr, &signal->theData[0]);
+ } else {
+ ndbassert(false);
+ }
+}
+
+/*
+ * Estimate entries in range. Scan is at first entry. Search for last
+ * entry i.e. start of descending scan. Use the 2 positions to estimate
+ * entries before and after the range. Finally get entries in range by
+ * subtracting from total. Errors come from imperfectly balanced tree
+ * and from uncommitted entries which differ only in tuple version.
+ *
+ * Returns 4 Uint32 values: 0) total entries 1) in range 2) before range
+ * 3) after range. 1-3) are estimates and need not add up to 0).
+ */
+void
+Dbtux::statRecordsInRange(ScanOpPtr scanPtr, Uint32* out)
+{
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+ TreeHead& tree = frag.m_tree;
+ // get first and last position
+ TreePos pos1 = scan.m_scanPos;
+ TreePos pos2;
+ { // as in scanFirst()
+ TreeHead& tree = frag.m_tree;
+ setKeyAttrs(frag);
+ const unsigned idir = 1;
+ const ScanBound& bound = *scan.m_bound[idir];
+ ScanBoundIterator iter;
+ bound.first(iter);
+ for (unsigned j = 0; j < bound.getSize(); j++) {
+ jam();
+ c_dataBuffer[j] = *iter.data;
+ bound.next(iter);
+ }
+ searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], true, pos2);
+ // committed read (same timeslice) and range not empty
+ ndbrequire(pos2.m_loc != NullTupLoc);
+ }
+ out[0] = frag.m_tree.m_entryCount;
+ out[2] = getEntriesBeforeOrAfter(frag, pos1, 0);
+ out[3] = getEntriesBeforeOrAfter(frag, pos2, 1);
+ if (pos1.m_loc == pos2.m_loc) {
+ ndbrequire(pos2.m_pos >= pos1.m_pos);
+ out[1] = pos2.m_pos - pos1.m_pos + 1;
+ } else {
+ Uint32 rem = out[2] + out[3];
+ if (out[0] > rem) {
+ out[1] = out[0] - rem;
+ } else {
+ // random guess one node apart
+ out[1] = tree.m_maxOccup;
+ }
+ }
+}
+
+/*
+ * Estimate number of entries strictly before or after given position.
+ * Each branch to right direction wins parent node and the subtree on
+ * the other side. Subtree entries is estimated from depth and total
+ * entries by assuming that the tree is perfectly balanced.
+ */
+Uint32
+Dbtux::getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir)
+{
+ NodeHandle node(frag);
+ selectNode(node, pos.m_loc);
+ Uint16 path[MaxTreeDepth + 1];
+ unsigned depth = getPathToNode(node, path);
+ ndbrequire(depth != 0 && depth <= MaxTreeDepth);
+ TreeHead& tree = frag.m_tree;
+ Uint32 cnt = 0;
+ Uint32 tot = tree.m_entryCount;
+ unsigned i = 0;
+ // contribution from levels above
+ while (i + 1 < depth) {
+ unsigned occup2 = (path[i] >> 8);
+ unsigned side = (path[i + 1] & 0xFF);
+ // subtree of this node has about half the entries
+ tot = tot >= occup2 ? (tot - occup2) / 2 : 0;
+ // branch to other side wins parent and a subtree
+ if (side != idir) {
+ cnt += occup2;
+ cnt += tot;
+ }
+ i++;
+ }
+ // contribution from this node
+ unsigned occup = (path[i] >> 8);
+ ndbrequire(pos.m_pos < occup);
+ if (idir == 0) {
+ if (pos.m_pos != 0)
+ cnt += pos.m_pos - 1;
+ } else {
+ cnt += occup - (pos.m_pos + 1);
+ }
+ // contribution from levels below
+ tot = tot >= occup ? (tot - occup) / 2 : 0;
+ cnt += tot;
+ return cnt;
+}
+
+/*
+ * Construct path to given node. Returns depth. Root node has path
+ * 2 and depth 1. In general the path is 2{0,1}* where 0,1 is the side
+ * (left,right branch). In addition the occupancy of each node is
+ * returned in the upper 8 bits.
+ */
+unsigned
+Dbtux::getPathToNode(NodeHandle node, Uint16* path)
+{
+ TupLoc loc = node.m_loc;
+ unsigned i = MaxTreeDepth;
+ while (loc != NullTupLoc) {
+ jam();
+ selectNode(node, loc);
+ path[i] = node.getSide() | (node.getOccup() << 8);
+ loc = node.getLink(2);
+ ndbrequire(i != 0);
+ i--;
+ }
+ unsigned depth = MaxTreeDepth - i;
+ unsigned j = 0;
+ while (j < depth) {
+ path[j] = path[i + 1 + j];
+ j++;
+ }
+ path[j] = 0xFFFF; // catch bug
+ return depth;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 5107a8d8e31..cc2725c4d89 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -26,25 +26,29 @@ Dbtux::treeAdd(Frag& frag, TreePos treePos, TreeEnt ent)
{
TreeHead& tree = frag.m_tree;
NodeHandle node(frag);
- if (treePos.m_loc != NullTupLoc) {
- // non-empty tree
- jam();
- selectNode(node, treePos.m_loc);
- unsigned pos = treePos.m_pos;
- if (node.getOccup() < tree.m_maxOccup) {
- // node has room
+ do {
+ if (treePos.m_loc != NullTupLoc) {
+ // non-empty tree
jam();
- nodePushUp(node, pos, ent, RNIL);
- return;
+ selectNode(node, treePos.m_loc);
+ unsigned pos = treePos.m_pos;
+ if (node.getOccup() < tree.m_maxOccup) {
+ // node has room
+ jam();
+ nodePushUp(node, pos, ent, RNIL);
+ break;
+ }
+ treeAddFull(frag, node, pos, ent);
+ break;
}
- treeAddFull(frag, node, pos, ent);
- return;
- }
- jam();
- insertNode(node);
- nodePushUp(node, 0, ent, RNIL);
- node.setSide(2);
- tree.m_root = node.m_loc;
+ jam();
+ insertNode(node);
+ nodePushUp(node, 0, ent, RNIL);
+ node.setSide(2);
+ tree.m_root = node.m_loc;
+ break;
+ } while (0);
+ tree.m_entryCount++;
}
/*
@@ -178,31 +182,36 @@ Dbtux::treeRemove(Frag& frag, TreePos treePos)
NodeHandle node(frag);
selectNode(node, treePos.m_loc);
TreeEnt ent;
- if (node.getOccup() > tree.m_minOccup) {
- // no underflow in any node type
- jam();
+ do {
+ if (node.getOccup() > tree.m_minOccup) {
+ // no underflow in any node type
+ jam();
+ nodePopDown(node, pos, ent, 0);
+ break;
+ }
+ if (node.getChilds() == 2) {
+ // underflow in interior node
+ jam();
+ treeRemoveInner(frag, node, pos);
+ break;
+ }
+ // remove entry in semi/leaf
nodePopDown(node, pos, ent, 0);
- return;
- }
- if (node.getChilds() == 2) {
- // underflow in interior node
- jam();
- treeRemoveInner(frag, node, pos);
- return;
- }
- // remove entry in semi/leaf
- nodePopDown(node, pos, ent, 0);
- if (node.getLink(0) != NullTupLoc) {
- jam();
- treeRemoveSemi(frag, node, 0);
- return;
- }
- if (node.getLink(1) != NullTupLoc) {
- jam();
- treeRemoveSemi(frag, node, 1);
- return;
- }
- treeRemoveLeaf(frag, node);
+ if (node.getLink(0) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 0);
+ break;
+ }
+ if (node.getLink(1) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 1);
+ break;
+ }
+ treeRemoveLeaf(frag, node);
+ break;
+ } while (0);
+ ndbrequire(tree.m_entryCount != 0);
+ tree.m_entryCount--;
}
/*
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
index 12d450e8632..41eefaf0c3e 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
+++ b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -9,6 +9,7 @@ libdbtux_a_SOURCES = \
DbtuxScan.cpp \
DbtuxSearch.cpp \
DbtuxCmp.cpp \
+ DbtuxStat.cpp \
DbtuxDebug.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/kernel/blocks/dbtup
diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am
index a4a0b8098a2..12590b6b8a5 100644
--- a/storage/ndb/src/ndbapi/Makefile.am
+++ b/storage/ndb/src/ndbapi/Makefile.am
@@ -35,6 +35,7 @@ libndbapi_la_SOURCES = \
DictCache.cpp \
ndb_cluster_connection.cpp \
NdbBlob.cpp \
+ NdbIndexStat.cpp \
SignalSender.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 0d464c6d412..2a6cf07b2ca 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -1070,3 +1070,4 @@ const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
const NdbDictionary::Column * NdbDictionary::Column::RANGE_NO = 0;
+const NdbDictionary::Column * NdbDictionary::Column::RECORDS_IN_RANGE = 0;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 529ba09207b..f7b23200223 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -266,6 +266,11 @@ NdbColumnImpl::create_pseudo(const char * name){
col->m_impl.m_attrId = AttributeHeader::RANGE_NO;
col->m_impl.m_attrSize = 4;
col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$RECORDS_IN_RANGE")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::RECORDS_IN_RANGE;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 4;
} else {
abort();
}
@@ -739,12 +744,14 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
delete NdbDictionary::Column::COMMIT_COUNT;
delete NdbDictionary::Column::ROW_SIZE;
delete NdbDictionary::Column::RANGE_NO;
+ delete NdbDictionary::Column::RECORDS_IN_RANGE;
NdbDictionary::Column::FRAGMENT= 0;
NdbDictionary::Column::FRAGMENT_MEMORY= 0;
NdbDictionary::Column::ROW_COUNT= 0;
NdbDictionary::Column::COMMIT_COUNT= 0;
NdbDictionary::Column::ROW_SIZE= 0;
NdbDictionary::Column::RANGE_NO= 0;
+ NdbDictionary::Column::RECORDS_IN_RANGE= 0;
}
m_globalHash->unlock();
} else {
@@ -817,6 +824,8 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
NdbColumnImpl::create_pseudo("NDB$ROW_SIZE");
NdbDictionary::Column::RANGE_NO=
NdbColumnImpl::create_pseudo("NDB$RANGE_NO");
+ NdbDictionary::Column::RECORDS_IN_RANGE=
+ NdbColumnImpl::create_pseudo("NDB$RECORDS_IN_RANGE");
}
m_globalHash->unlock();
return true;
@@ -2175,6 +2184,7 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
}
* dst = idx;
+
return 0;
}
@@ -3209,4 +3219,3 @@ template class Vector<Uint32>;
template class Vector<Vector<Uint32> >;
template class Vector<NdbTableImpl*>;
template class Vector<NdbColumnImpl*>;
-
diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp
new file mode 100644
index 00000000000..3f46d7909cd
--- /dev/null
+++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp
@@ -0,0 +1,490 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <AttributeHeader.hpp>
+#include <NdbSqlUtil.hpp>
+#include <NdbIndexStat.hpp>
+#include <NdbTransaction.hpp>
+#include <NdbIndexScanOperation.hpp>
+#include "NdbDictionaryImpl.hpp"
+#include <my_sys.h>
+
+NdbIndexStat::NdbIndexStat(const NdbDictionary::Index* index) :
+ m_index(index->m_impl),
+ m_cache(NULL)
+{
+}
+
+NdbIndexStat::~NdbIndexStat()
+{
+ delete [] m_cache;
+ m_cache = NULL;
+}
+
+int
+NdbIndexStat::alloc_cache(Uint32 entries)
+{
+ delete [] m_cache;
+ m_cache = NULL;
+ if (entries == 0) {
+ return 0;
+ }
+ Uint32 i;
+ Uint32 keysize = 0;
+ for (i = 0; i < m_index.m_columns.size(); i++) {
+ NdbColumnImpl* c = m_index.m_columns[i];
+ keysize += 2; // counting extra headers
+ keysize += (c->m_attrSize * c->m_arraySize + 3 ) / 4;
+ }
+ Uint32 areasize = entries * (PointerSize + EntrySize + keysize);
+ if (areasize > (1 << 16))
+ areasize = (1 << 16);
+ Uint32 cachesize = 2 * areasize;
+ m_cache = new Uint32 [cachesize];
+ if (m_cache == NULL) {
+ set_error(4000);
+ return -1;
+ }
+ m_areasize = areasize;
+ m_seq = 0;
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ a.m_data = &m_cache[idir * areasize];
+ a.m_offset = a.m_data - &m_cache[0];
+ a.m_free = areasize;
+ a.m_entries = 0;
+ a.m_idir = idir;
+ a.pad1 = 0;
+ }
+#ifdef VM_TRACE
+ memset(&m_cache[0], 0x3f, cachesize << 2);
+#endif
+ return 0;
+}
+
+#ifndef VM_TRACE
+#define stat_verify()
+#else
+void
+NdbIndexStat::stat_verify()
+{
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Uint32 i;
+ const Area& a = m_area[idir];
+ assert(a.m_offset == idir * m_areasize);
+ assert(a.m_data == &m_cache[a.m_offset]);
+ Uint32 pointerwords = PointerSize * a.m_entries;
+ Uint32 entrywords = 0;
+ for (i = 0; i < a.m_entries; i++) {
+ const Pointer& p = a.get_pointer(i);
+ const Entry& e = a.get_entry(i);
+ assert(a.get_pos(e) == p.m_pos);
+ entrywords += EntrySize + e.m_keylen;
+ }
+ assert(a.m_free <= m_areasize);
+ assert(pointerwords + a.m_free + entrywords == m_areasize);
+ Uint32 off = pointerwords + a.m_free;
+ for (i = 0; i < a.m_entries; i++) {
+ assert(off < m_areasize);
+ const Entry& e = *(const Entry*)&a.m_data[off];
+ off += EntrySize + e.m_keylen;
+ }
+ assert(off == m_areasize);
+ for (i = 0; i < a.m_entries; i++) {
+ const Entry& e = a.get_entry(i);
+ const Uint32* entrykey = (const Uint32*)&e + EntrySize;
+ Uint32 n = 0;
+ while (n + 2 <= e.m_keylen) {
+ Uint32 t = entrykey[n++];
+ assert(t == 2 * idir || t == 2 * idir + 1 || t == 4);
+ AttributeHeader ah = *(const AttributeHeader*)&entrykey[n++];
+ n += ah.getDataSize();
+ }
+ assert(n == e.m_keylen);
+ }
+ for (i = 0; i + 1 < a.m_entries; i++) {
+ const Entry& e1 = a.get_entry(i);
+ const Entry& e2 = a.get_entry(i + 1);
+ const Uint32* entrykey1 = (const Uint32*)&e1 + EntrySize;
+ const Uint32* entrykey2 = (const Uint32*)&e2 + EntrySize;
+ int ret = stat_cmpkey(a, entrykey1, e1.m_keylen, entrykey2, e2.m_keylen);
+ assert(ret == -1);
+ }
+ }
+}
+#endif
+
+// compare keys
+int
+NdbIndexStat::stat_cmpkey(const Area& a, const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2)
+{
+ const Uint32 idir = a.m_idir;
+ const int jdir = 1 - 2 * int(idir);
+ Uint32 i1 = 0, i2 = 0;
+ Uint32 t1 = 4, t2 = 4; //BoundEQ
+ int ret = 0;
+ Uint32 k = 0;
+ while (k < m_index.m_columns.size()) {
+ NdbColumnImpl* c = m_index.m_columns[k];
+ Uint32 n = c->m_attrSize * c->m_arraySize;
+ // absence of keypart is treated specially
+ bool havekp1 = (i1 + 2 <= keylen1);
+ bool havekp2 = (i2 + 2 <= keylen2);
+ AttributeHeader ah1;
+ AttributeHeader ah2;
+ if (havekp1) {
+ t1 = key1[i1++];
+ assert(t1 == 2 * idir || t1 == 2 * idir + 1 || t1 == 4);
+ ah1 = *(const AttributeHeader*)&key1[i1++];
+ }
+ if (havekp2) {
+ t2 = key2[i2++];
+ assert(t2 == 2 * idir || t2 == 2 * idir + 1 || t2 == 4);
+ ah2 = *(const AttributeHeader*)&key2[i2++];
+ }
+ if (havekp1) {
+ if (havekp2) {
+ if (! ah1.isNULL()) {
+ if (! ah2.isNULL()) {
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(c->m_type);
+ ret = (*sqlType.m_cmp)(c->m_cs, &key1[i1], n, &key2[i2], n, true);
+ if (ret != 0)
+ break;
+ } else {
+ ret = +1;
+ break;
+ }
+ } else if (! ah2.isNULL()) {
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = +jdir;
+ break;
+ }
+ } else {
+ if (havekp2) {
+ ret = -jdir;
+ break;
+ } else {
+ // no more keyparts on either side
+ break;
+ }
+ }
+ i1 += ah1.getDataSize();
+ i2 += ah2.getDataSize();
+ k++;
+ }
+ if (ret == 0) {
+ // strict bound is greater as start key and less as end key
+ int s1 = t1 & 1;
+ int s2 = t2 & 1;
+ ret = (s1 - s2) * jdir;
+ }
+ return ret;
+}
+
+// find first key >= given key
+int
+NdbIndexStat::stat_search(const Area& a, const Uint32* key, Uint32 keylen, Uint32* idx, bool* match)
+{
+ // points at minus/plus infinity
+ int lo = -1;
+ int hi = a.m_entries;
+ // loop invariant: key(lo) < key < key(hi)
+ while (hi - lo > 1) {
+ // observe lo < j < hi
+ int j = (hi + lo) / 2;
+ Entry& e = a.get_entry(j);
+ const Uint32* key2 = (Uint32*)&e + EntrySize;
+ Uint32 keylen2 = e.m_keylen;
+ int ret = stat_cmpkey(a, key, keylen, key2, keylen2);
+ // observe the loop invariant if ret != 0
+ if (ret < 0)
+ hi = j;
+ else if (ret > 0)
+ lo = j;
+ else {
+ *idx = j;
+ *match = true;
+ return 0;
+ }
+ }
+ // hi - lo == 1 and key(lo) < key < key(hi)
+ *idx = hi;
+ *match = false;
+ return 0;
+}
+
+// find oldest entry
+int
+NdbIndexStat::stat_oldest(const Area& a)
+{
+ Uint32 i, k, m;
+ bool found = false;
+ for (i = 0; i < a.m_entries; i++) {
+ Pointer& p = a.get_pointer(i);
+ Entry& e = a.get_entry(i);
+ Uint32 m2 = m_seq >= p.m_seq ? m_seq - p.m_seq : p.m_seq - m_seq;
+ if (! found || m < m2) {
+ m = m2;
+ k = i;
+ found = true;
+ }
+ }
+ assert(found);
+ return k;
+}
+
+// delete entry
+int
+NdbIndexStat::stat_delete(Area& a, Uint32 k)
+{
+ Uint32 i;
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ Uint32 entrylen = EntrySize + e.m_keylen;
+ Uint32 pos = a.get_pos(e);
+ // adjust pointers to entries after
+ for (i = 0; i < a.m_entries; i++) {
+ Pointer& p = a.get_pointer(i);
+ if (p.m_pos < pos) {
+ p.m_pos += entrylen;
+ }
+ }
+ // compact entry area
+ unsigned firstpos = a.get_firstpos();
+ for (i = pos; i > firstpos; i--) {
+ a.m_data[i + entrylen - 1] = a.m_data[i - 1];
+ }
+ // compact pointer area
+ for (i = k; i + 1 < a.m_entries; i++) {
+ NdbIndexStat::Pointer& p = a.get_pointer(i);
+ NdbIndexStat::Pointer& q = a.get_pointer(i + 1);
+ p = q;
+ }
+ a.m_free += PointerSize + entrylen;
+ a.m_entries--;
+ stat_verify();
+ return 0;
+}
+
+// update or insert stat values
+int
+NdbIndexStat::stat_update(const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2, const float pct[2])
+{
+ const Uint32* const key[2] = { key1, key2 };
+ const Uint32 keylen[2] = { keylen1, keylen2 };
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ Uint32 k;
+ bool match;
+ stat_search(a, key[idir], keylen[idir], &k, &match);
+ Uint16 seq = m_seq++;
+ if (match) {
+ // update old entry
+ NdbIndexStat::Pointer& p = a.get_pointer(k);
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ e.m_pct = pct[idir];
+ p.m_seq = seq;
+ } else {
+ Uint32 entrylen = NdbIndexStat::EntrySize + keylen[idir];
+ Uint32 need = NdbIndexStat::PointerSize + entrylen;
+ while (need > a.m_free) {
+ Uint32 j = stat_oldest(a);
+ if (j < k)
+ k--;
+ stat_delete(a, j);
+ }
+ // insert pointer
+ Uint32 i;
+ for (i = a.m_entries; i > k; i--) {
+ NdbIndexStat::Pointer& p1 = a.get_pointer(i);
+ NdbIndexStat::Pointer& p2 = a.get_pointer(i - 1);
+ p1 = p2;
+ }
+ NdbIndexStat::Pointer& p = a.get_pointer(k);
+ // insert entry
+ Uint32 firstpos = a.get_firstpos();
+ p.m_pos = firstpos - entrylen;
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ e.m_pct = pct[idir];
+ e.m_keylen = keylen[idir];
+ Uint32* entrykey = (Uint32*)&e + EntrySize;
+ for (i = 0; i < keylen[idir]; i++) {
+ entrykey[i] = key[idir][i];
+ }
+ p.m_seq = seq;
+ // total
+ a.m_free -= PointerSize + entrylen;
+ a.m_entries++;
+ }
+ }
+ stat_verify();
+ return 0;
+}
+
+int
+NdbIndexStat::stat_select(const Uint32* key1, Uint32 keylen1, const Uint32* key2, Uint32 keylen2, float pct[2])
+{
+ const Uint32* const key[2] = { key1, key2 };
+ const Uint32 keylen[2] = { keylen1, keylen2 };
+ Uint32 idir;
+ for (idir = 0; idir <= 1; idir++) {
+ Area& a = m_area[idir];
+ Uint32 k;
+ bool match;
+ stat_search(a, key[idir], keylen[idir], &k, &match);
+ if (match) {
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ pct[idir] = e.m_pct;
+ } else if (k == 0) {
+ NdbIndexStat::Entry& e = a.get_entry(k);
+ if (idir == 0)
+ pct[idir] = e.m_pct / 2;
+ else
+ pct[idir] = e.m_pct + (1 - e.m_pct) / 2;
+ } else if (k == a.m_entries) {
+ NdbIndexStat::Entry& e = a.get_entry(k - 1);
+ if (idir == 0)
+ pct[idir] = e.m_pct + (1 - e.m_pct) / 2;
+ else
+ pct[idir] = e.m_pct / 2;
+ } else {
+ NdbIndexStat::Entry& e1 = a.get_entry(k - 1);
+ NdbIndexStat::Entry& e2 = a.get_entry(k);
+ pct[idir] = (e1.m_pct + e2.m_pct) / 2;
+ }
+ }
+ return 0;
+}
+
+int
+NdbIndexStat::records_in_range(NdbDictionary::Index* index, NdbIndexScanOperation* op, Uint64 table_rows, Uint64* count, int flags)
+{
+ DBUG_ENTER("NdbIndexStat::records_in_range");
+ Uint64 rows;
+ Uint32 key1[1000], keylen1;
+ Uint32 key2[1000], keylen2;
+
+ if (m_cache == NULL)
+ flags |= RR_UseDb | RR_NoUpdate;
+ else if (m_area[0].m_entries == 0 || m_area[1].m_entries == 0)
+ flags |= RR_UseDb;
+
+ if ((flags & (RR_UseDb | RR_NoUpdate)) != RR_UseDb | RR_NoUpdate) {
+ // get start and end key - assume bound is ordered, wellformed
+ Uint32 bound[1000];
+ Uint32 boundlen = op->getKeyFromSCANTABREQ(bound, 1000);
+
+ keylen1 = keylen2 = 0;
+ Uint32 n = 0;
+ while (n < boundlen) {
+ Uint32 t = bound[n];
+ AttributeHeader ah(bound[n + 1]);
+ Uint32 sz = 2 + ah.getDataSize();
+ t &= 0xFFFF; // may contain length
+ assert(t <= 4);
+ bound[n] = t;
+ if (t == 0 || t == 1 || t == 4) {
+ memcpy(&key1[keylen1], &bound[n], sz << 2);
+ keylen1 += sz;
+ }
+ if (t == 2 || t == 3 || t == 4) {
+ memcpy(&key2[keylen2], &bound[n], sz << 2);
+ keylen2 += sz;
+ }
+ n += sz;
+ }
+ }
+
+ if (flags & RR_UseDb) {
+ Uint32 out[4] = { 0, 0, 0, 0 }; // rows, in, before, after
+ float tot[4] = { 0, 0, 0, 0 }; // totals of above
+ int cnt, ret;
+ bool forceSend = true;
+ NdbTransaction* trans = op->m_transConnection;
+ if (op->interpret_exit_last_row() == -1 ||
+ op->getValue(NdbDictionary::Column::RECORDS_IN_RANGE, (char*)out) == 0) {
+ DBUG_PRINT("error", ("op:%d", op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ if (trans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AbortOnError, forceSend) == -1) {
+ DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
+ op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ cnt = 0;
+ while ((ret = op->nextResult(true, forceSend)) == 0) {
+ DBUG_PRINT("info", ("frag rows=%u in=%u before=%u after=%u [error=%d]",
+ out[0], out[1], out[2], out[3],
+ (int)(out[1] + out[2] + out[3]) - (int)out[0]));
+ unsigned i;
+ for (i = 0; i < 4; i++)
+ tot[i] += (float)out[i];
+ cnt++;
+ }
+ if (ret == -1) {
+ DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code,
+ op->getNdbError().code));
+ DBUG_RETURN(-1);
+ }
+ op->close(forceSend);
+ rows = (Uint64)tot[1];
+ if (cnt != 0 && ! (flags & RR_NoUpdate)) {
+ float pct[2];
+ pct[0] = 100 * tot[2] / tot[0];
+ pct[1] = 100 * tot[3] / tot[0];
+ DBUG_PRINT("info", ("update stat pct"
+ " before=%.2f after=%.2f",
+ pct[0], pct[1]));
+ stat_update(key1, keylen1, key2, keylen2, pct);
+ }
+ } else {
+ float pct[2];
+ stat_select(key1, keylen1, key2, keylen2, pct);
+ float diff = 100.0 - (pct[0] + pct[1]);
+ float trows = (float)table_rows;
+ DBUG_PRINT("info", ("select stat pct"
+ " before=%.2f after=%.2f in=%.2f table_rows=%.2f",
+ pct[0], pct[1], diff, trows));
+ rows = 0;
+ if (diff >= 0)
+ rows = (Uint64)(diff * trows / 100);
+ if (rows == 0)
+ rows = 1;
+ }
+
+ *count = rows;
+ DBUG_PRINT("value", ("rows=%llu flags=%o", rows, flags));
+ DBUG_RETURN(0);
+}
+
+void
+NdbIndexStat::set_error(int code)
+{
+ m_error.code = code;
+}
+
+const NdbError&
+NdbIndexStat::getNdbError() const
+{
+ return m_error;
+}
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index 288b8dc8bd8..cd2a46e7f7c 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -1198,6 +1198,31 @@ error:
return -1;
}
+Uint32
+NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
+{
+ DBUG_ENTER("NdbIndexScanOperation::getKeyFromSCANTABREQ");
+ assert(size >= theTotalNrOfKeyWordInSignal);
+ size = theTotalNrOfKeyWordInSignal;
+ NdbApiSignal* tSignal = theSCAN_TABREQ->next();
+ Uint32 pos = 0;
+ while (pos < size) {
+ assert(tSignal != NULL);
+ Uint32* tData = tSignal->getDataPtrSend();
+ Uint32 rem = size - pos;
+ if (rem > KeyInfo::DataLength)
+ rem = KeyInfo::DataLength;
+ Uint32 i = 0;
+ while (i < rem) {
+ data[pos + i] = tData[KeyInfo::HeaderLength + i];
+ i++;
+ }
+ pos += rem;
+ }
+ DBUG_DUMP("key", (char*)data, size << 2);
+ DBUG_RETURN(size);
+}
+
int
NdbIndexScanOperation::readTuples(LockMode lm,
Uint32 scan_flags,